diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index c8d8fe4c697b9..e72b7a670c750 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -591,6 +591,8 @@ API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,V API rule violation: names_match,k8s.io/kube-proxy/config/v1alpha1,KubeProxyConfiguration,IPTables API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,IPTablesDropBit API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,IPTablesMasqueradeBit +API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,QoSResourceManagerReconcilePeriod +API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,QoSResourceManagerResourceNamesMap API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,ResolverConfig API rule violation: names_match,k8s.io/metrics/pkg/apis/custom_metrics/v1beta1,MetricValue,WindowSeconds API rule violation: names_match,k8s.io/metrics/pkg/apis/external_metrics/v1beta1,ExternalMetricValue,WindowSeconds diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index c7a1bb2e7ac83..38e596672c94b 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -450,6 +450,8 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig fs.StringVar(&c.CPUManagerPolicy, "cpu-manager-policy", c.CPUManagerPolicy, "CPU Manager policy to use. Possible values: 'none', 'static'.") fs.Var(cliflag.NewMapStringStringNoSplit(&c.CPUManagerPolicyOptions), "cpu-manager-policy-options", "A set of key=value CPU Manager policy options to use, to fine tune their behaviour. If not supplied, keep the default behaviour.") fs.DurationVar(&c.CPUManagerReconcilePeriod.Duration, "cpu-manager-reconcile-period", c.CPUManagerReconcilePeriod.Duration, " CPU Manager reconciliation period. Examples: '10s', or '1m'. If not supplied, defaults to 'NodeStatusUpdateFrequency'") + fs.DurationVar(&c.QoSResourceManagerReconcilePeriod.Duration, "qos-resource-manager-reconcile-period", c.QoSResourceManagerReconcilePeriod.Duration, " QoS Resource Manager reconciliation period. Examples: '10s', or '1m'. If not supplied, defaults to 3s") + fs.Var(cliflag.NewMapStringString(&c.QoSResourceManagerResourceNamesMap), "qos-resource-manager-resource-names-map", "A set of ResourceName=ResourceQuantity (e.g. best-effort-cpu=cpu,best-effort-memory=memory,...) pairs that map resource name \"best-effort-cpu\" to resource name \"cpu\" during QoS Resource Manager allocation period.") fs.Var(cliflag.NewMapStringString(&c.QOSReserved), "qos-reserved", " A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe how pod resource requests are reserved at the QoS level. Currently only memory is supported. Requires the QOSReserved feature gate to be enabled.") fs.StringVar(&c.TopologyManagerPolicy, "topology-manager-policy", c.TopologyManagerPolicy, "Topology Manager policy to use. Possible values: 'none', 'best-effort', 'restricted', 'single-numa-node'.") fs.DurationVar(&c.RuntimeRequestTimeout.Duration, "runtime-request-timeout", c.RuntimeRequestTimeout.Duration, "Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later.") diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 8ac37d97292cd..07d67ffbc3215 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -95,6 +95,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" utilfs "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/pkg/util/flock" + maputil "k8s.io/kubernetes/pkg/util/maps" nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/rlimit" @@ -725,17 +726,19 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend ReservedSystemCPUs: reservedSystemCPUs, HardEvictionThresholds: hardEvictionThresholds, }, - QOSReserved: *experimentalQOSReserved, - ExperimentalCPUManagerPolicy: s.CPUManagerPolicy, - ExperimentalCPUManagerPolicyOptions: cpuManagerPolicyOptions, - ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration, - ExperimentalMemoryManagerPolicy: s.MemoryManagerPolicy, - ExperimentalMemoryManagerReservedMemory: s.ReservedMemory, - ExperimentalPodPidsLimit: s.PodPidsLimit, - EnforceCPULimits: s.CPUCFSQuota, - CPUCFSQuotaPeriod: s.CPUCFSQuotaPeriod.Duration, - ExperimentalTopologyManagerPolicy: s.TopologyManagerPolicy, - ExperimentalTopologyManagerScope: s.TopologyManagerScope, + QOSReserved: *experimentalQOSReserved, + ExperimentalCPUManagerPolicy: s.CPUManagerPolicy, + ExperimentalCPUManagerPolicyOptions: cpuManagerPolicyOptions, + ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration, + ExperimentalMemoryManagerPolicy: s.MemoryManagerPolicy, + ExperimentalMemoryManagerReservedMemory: s.ReservedMemory, + ExperimentalQoSResourceManagerReconcilePeriod: s.QoSResourceManagerReconcilePeriod.Duration, + QoSResourceManagerResourceNamesMap: maputil.CopySS(s.QoSResourceManagerResourceNamesMap), + ExperimentalPodPidsLimit: s.PodPidsLimit, + EnforceCPULimits: s.CPUCFSQuota, + CPUCFSQuotaPeriod: s.CPUCFSQuotaPeriod.Duration, + ExperimentalTopologyManagerPolicy: s.TopologyManagerPolicy, + ExperimentalTopologyManagerScope: s.TopologyManagerScope, }, s.FailSwapOn, devicePluginEnabled, diff --git a/hack/update-generated-resource-plugin-dockerized.sh b/hack/update-generated-resource-plugin-dockerized.sh new file mode 100755 index 0000000000000..6a6ec64054b50 --- /dev/null +++ b/hack/update-generated-resource-plugin-dockerized.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script generates `*/api.pb.go` from the protobuf file `*/api.proto`. +# Example: +# kube::protoc::generate_proto "${RESOURCE_PLUGIN_ALPHA}" + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" && pwd -P)" +RESOURCE_PLUGIN_ALPHA="${KUBE_ROOT}/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + +source "${KUBE_ROOT}/hack/lib/protoc.sh" +kube::protoc::generate_proto "${RESOURCE_PLUGIN_ALPHA}" diff --git a/hack/update-generated-resource-plugin.sh b/hack/update-generated-resource-plugin.sh new file mode 100755 index 0000000000000..743b2b4f0aac7 --- /dev/null +++ b/hack/update-generated-resource-plugin.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +# NOTE: All output from this script needs to be copied back to the calling +# source tree. This is managed in kube::build::copy_output in build/common.sh. +# If the output set is changed update that function. + +"${KUBE_ROOT}/build/run.sh" hack/update-generated-resource-plugin-dockerized.sh "$@" diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index c1a3e6e7f9a33..37746f9289caa 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -882,6 +882,12 @@ const ( // // Enables support for time zones in CronJobs. CronJobTimeZone featuregate.Feature = "CronJobTimeZone" + + // owner: @sunjianyu + // alpha: v1.18 + // + // Enable qos resource managers to make NUMA aligned decisions for resources which aren't devices + QoSResourceManager featuregate.Feature = "QoSResourceManager" ) func init() { @@ -1009,6 +1015,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MaxUnavailableStatefulSet: {Default: false, PreRelease: featuregate.Alpha}, NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha}, CronJobTimeZone: {Default: false, PreRelease: featuregate.Alpha}, + QoSResourceManager: {Default: false, PreRelease: featuregate.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index b434c31600994..9ee9a1cca7f03 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -52942,6 +52942,12 @@ func schema_k8sio_kubelet_config_v1beta1_KubeletConfiguration(ref common.Referen Format: "", }, }, + "qosResourceManagerReconcilePeriod": { + SchemaProps: spec.SchemaProps{ + Description: "QoS Resource Manager reconciliation period. Requires the QoSResourceManager feature gate to be enabled. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that shortening the period may carry a performance impact. Default: \"3s\"", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, "topologyManagerScope": { SchemaProps: spec.SchemaProps{ Description: "topologyManagerScope represents the scope of topology hint generation that topology manager requests and hint providers generate. Valid values include:\n\n- `container`: topology policy is applied on a per-container basis. - `pod`: topology policy is applied on a per-pod basis.\n\n\"pod\" scope requires the TopologyManager feature gate to be enabled. Default: \"container\"", diff --git a/pkg/kubelet/apis/config/helpers_test.go b/pkg/kubelet/apis/config/helpers_test.go index 36248e3e36987..5876b214cb460 100644 --- a/pkg/kubelet/apis/config/helpers_test.go +++ b/pkg/kubelet/apis/config/helpers_test.go @@ -282,5 +282,6 @@ var ( "ShutdownGracePeriod.Duration", "ShutdownGracePeriodCriticalPods.Duration", "MemoryThrottlingFactor", + "QoSResourceManagerReconcilePeriod.Duration", ) ) diff --git a/pkg/kubelet/apis/config/types.go b/pkg/kubelet/apis/config/types.go index 80f317fae5899..44ee41ae04956 100644 --- a/pkg/kubelet/apis/config/types.go +++ b/pkg/kubelet/apis/config/types.go @@ -232,6 +232,12 @@ type KubeletConfiguration struct { // MemoryManagerPolicy is the name of the policy to use. // Requires the MemoryManager feature gate to be enabled. MemoryManagerPolicy string + // QoS Resource Manager reconciliation period. + // Requires the QoSResourceManager feature gate to be enabled. + QoSResourceManagerReconcilePeriod metav1.Duration + // Map of resource name "A" to resource name "B" during QoS Resource Manager allocation period. + // It's useful for the same kind resource with different types. (eg. maps best-effort-cpu to cpu) + QoSResourceManagerResourceNamesMap map[string]string // TopologyManagerPolicy is the name of the policy to use. // Policies other than "none" require the TopologyManager feature gate to be enabled. TopologyManagerPolicy string diff --git a/pkg/kubelet/apis/config/v1beta1/defaults.go b/pkg/kubelet/apis/config/v1beta1/defaults.go index cbcdff636d28e..90c2e35bad3f3 100644 --- a/pkg/kubelet/apis/config/v1beta1/defaults.go +++ b/pkg/kubelet/apis/config/v1beta1/defaults.go @@ -163,6 +163,9 @@ func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfigura if obj.MemoryManagerPolicy == "" { obj.MemoryManagerPolicy = kubeletconfigv1beta1.NoneMemoryManagerPolicy } + if obj.QoSResourceManagerReconcilePeriod == zeroDuration { + obj.QoSResourceManagerReconcilePeriod = metav1.Duration{Duration: 5 * time.Second} + } if obj.TopologyManagerPolicy == "" { obj.TopologyManagerPolicy = kubeletconfigv1beta1.NoneTopologyManagerPolicy } diff --git a/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go b/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go index fa65f413e385e..3cc0def3aa512 100644 --- a/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go +++ b/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go @@ -411,6 +411,8 @@ func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod out.MemoryManagerPolicy = in.MemoryManagerPolicy out.TopologyManagerPolicy = in.TopologyManagerPolicy + out.QoSResourceManagerReconcilePeriod = in.QoSResourceManagerReconcilePeriod + out.QoSResourceManagerResourceNamesMap = *(*map[string]string)(unsafe.Pointer(&in.QoSResourceManagerResourceNamesMap)) out.TopologyManagerScope = in.TopologyManagerScope out.QOSReserved = *(*map[string]string)(unsafe.Pointer(&in.QOSReserved)) out.RuntimeRequestTimeout = in.RuntimeRequestTimeout @@ -589,6 +591,8 @@ func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in out.CPUManagerPolicyOptions = *(*map[string]string)(unsafe.Pointer(&in.CPUManagerPolicyOptions)) out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod out.MemoryManagerPolicy = in.MemoryManagerPolicy + out.QoSResourceManagerReconcilePeriod = in.QoSResourceManagerReconcilePeriod + out.QoSResourceManagerResourceNamesMap = *(*map[string]string)(unsafe.Pointer(&in.QoSResourceManagerResourceNamesMap)) out.TopologyManagerPolicy = in.TopologyManagerPolicy out.TopologyManagerScope = in.TopologyManagerScope out.QOSReserved = *(*map[string]string)(unsafe.Pointer(&in.QOSReserved)) diff --git a/pkg/kubelet/apis/config/zz_generated.deepcopy.go b/pkg/kubelet/apis/config/zz_generated.deepcopy.go index 78c79a3f0189e..aca05e3403bdd 100644 --- a/pkg/kubelet/apis/config/zz_generated.deepcopy.go +++ b/pkg/kubelet/apis/config/zz_generated.deepcopy.go @@ -210,6 +210,14 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { } } out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod + out.QoSResourceManagerReconcilePeriod = in.QoSResourceManagerReconcilePeriod + if in.QoSResourceManagerResourceNamesMap != nil { + in, out := &in.QoSResourceManagerResourceNamesMap, &out.QoSResourceManagerResourceNamesMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.QOSReserved != nil { in, out := &in.QOSReserved, &out.QOSReserved *out = make(map[string]string, len(*in)) diff --git a/pkg/kubelet/apis/podresources/server_v1.go b/pkg/kubelet/apis/podresources/server_v1.go index 948aa6c91008e..8b1a08f2d575e 100644 --- a/pkg/kubelet/apis/podresources/server_v1.go +++ b/pkg/kubelet/apis/podresources/server_v1.go @@ -24,25 +24,29 @@ import ( kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/kubelet/pkg/apis/podresources/v1" + v1 "k8s.io/kubelet/pkg/apis/podresources/v1" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + maputil "k8s.io/kubernetes/pkg/util/maps" ) // podResourcesServerV1alpha1 implements PodResourcesListerServer type v1PodResourcesServer struct { - podsProvider PodsProvider - devicesProvider DevicesProvider - cpusProvider CPUsProvider - memoryProvider MemoryProvider + podsProvider PodsProvider + devicesProvider DevicesProvider + cpusProvider CPUsProvider + memoryProvider MemoryProvider + resourcesProvider ResourcesProvider } // NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider // with device information provided by the DevicesProvider -func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider, cpusProvider CPUsProvider, memoryProvider MemoryProvider) v1.PodResourcesListerServer { +func NewV1PodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider, cpusProvider CPUsProvider, memoryProvider MemoryProvider, resourcesProvider ResourcesProvider) v1.PodResourcesListerServer { return &v1PodResourcesServer{ - podsProvider: podsProvider, - devicesProvider: devicesProvider, - cpusProvider: cpusProvider, - memoryProvider: memoryProvider, + podsProvider: podsProvider, + devicesProvider: devicesProvider, + cpusProvider: cpusProvider, + memoryProvider: memoryProvider, + resourcesProvider: resourcesProvider, } } @@ -54,25 +58,32 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource pods := p.podsProvider.GetPods() podResources := make([]*v1.PodResources, len(pods)) p.devicesProvider.UpdateAllocatedDevices() + p.resourcesProvider.UpdateAllocatedResources() for i, pod := range pods { pRes := v1.PodResources{ - Name: pod.Name, - Namespace: pod.Namespace, - Containers: make([]*v1.ContainerResources, len(pod.Spec.Containers)), + Name: pod.Name, + Namespace: pod.Namespace, + PodRole: pod.Labels[pluginapi.PodRoleLabelKey], + PodType: pod.Annotations[pluginapi.PodTypeAnnotationKey], + Labels: maputil.CopySS(pod.Labels), + Annotations: maputil.CopySS(pod.Annotations), + Containers: make([]*v1.ContainerResources, len(pod.Spec.Containers)), } for j, container := range pod.Spec.Containers { pRes.Containers[j] = &v1.ContainerResources{ - Name: container.Name, - Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), - CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name), - Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name), + Name: container.Name, + Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), + CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name), + Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name), + Resources: p.resourcesProvider.GetTopologyAwareResources(pod, &container), } } podResources[i] = &pRes } + // [TODO](sunjianyu): List always return error nil, but what if resource plugin rpc call gets error and Resources in ContainerResources is nil? return &v1.ListPodResourcesResponse{ PodResources: podResources, }, nil @@ -91,8 +102,9 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc() return &v1.AllocatableResourcesResponse{ - Devices: p.devicesProvider.GetAllocatableDevices(), - CpuIds: p.cpusProvider.GetAllocatableCPUs(), - Memory: p.memoryProvider.GetAllocatableMemory(), + Devices: p.devicesProvider.GetAllocatableDevices(), + CpuIds: p.cpusProvider.GetAllocatableCPUs(), + Memory: p.memoryProvider.GetAllocatableMemory(), + Resources: p.resourcesProvider.GetTopologyAwareAllocatableResources(), }, nil } diff --git a/pkg/kubelet/apis/podresources/types.go b/pkg/kubelet/apis/podresources/types.go index b6011d1297606..91978937d7b44 100644 --- a/pkg/kubelet/apis/podresources/types.go +++ b/pkg/kubelet/apis/podresources/types.go @@ -51,3 +51,13 @@ type MemoryProvider interface { // GetAllocatableMemory returns the allocatable memory from the node GetAllocatableMemory() []*podresourcesapi.ContainerMemory } + +// ResourcesProvider knows how to provide the resources used by the given container +type ResourcesProvider interface { + // UpdateAllocatedResources frees any Resources that are bound to terminated pods. + UpdateAllocatedResources() + // GetResources returns information about the resources assigned to pods and containers in topology aware format + GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.TopologyAwareResource + // GetAllocatableResources returns information about all the resources known to the manager in topology aware format + GetTopologyAwareAllocatableResources() []*podresourcesapi.AllocatableTopologyAwareResource +} diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index e67dbfc3daaa8..f4c748756f571 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -27,6 +27,7 @@ import ( v1 "k8s.io/api/core/v1" internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" + resourcepluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/apis/podresources" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" @@ -75,6 +76,12 @@ type ContainerManager interface { // GetCapacity returns the amount of compute resources tracked by container manager available on the node. GetCapacity() v1.ResourceList + // GetResourcePluginResourceCapacity returns the node capacity (amount of total resource plugin resources), + // node allocatable (amount of total healthy resources reported by resource plugin), + // and inactive resource plugin resources previously registered on the node. + // notice: only resources with IsNodeResource: True and IsScalarResource: True will be reported by this function. + GetResourcePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) + // GetDevicePluginResourceCapacity returns the node capacity (amount of total device plugin resources), // node allocatable (amount of total healthy resources reported by device plugin), // and inactive device plugin resources previously registered on the node. @@ -103,7 +110,7 @@ type ContainerManager interface { // GetPluginRegistrationHandler returns a plugin registration handler // The pluginwatcher's Handlers allow to have a single module for handling // registration. - GetPluginRegistrationHandler() cache.PluginHandler + GetPluginRegistrationHandler() map[string]cache.PluginHandler // ShouldResetExtendedResourceCapacity returns whether or not the extended resources should be zeroed, // due to node recreation. @@ -115,10 +122,15 @@ type ContainerManager interface { // GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement. GetNodeAllocatableAbsolute() v1.ResourceList + // GetResources returns ResourceRunContainerOptions with OCI resources config, annotations and envs fields populated for + // resources are managed by qos resource manager and required by container. + GetResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) + // Implements the podresources Provider API for CPUs, Memory and Devices podresources.CPUsProvider podresources.DevicesProvider podresources.MemoryProvider + podresources.ResourcesProvider } type NodeConfig struct { @@ -133,17 +145,19 @@ type NodeConfig struct { KubeletRootDir string ProtectKernelDefaults bool NodeAllocatableConfig - QOSReserved map[v1.ResourceName]int64 - ExperimentalCPUManagerPolicy string - ExperimentalCPUManagerPolicyOptions map[string]string - ExperimentalTopologyManagerScope string - ExperimentalCPUManagerReconcilePeriod time.Duration - ExperimentalMemoryManagerPolicy string - ExperimentalMemoryManagerReservedMemory []kubeletconfig.MemoryReservation - ExperimentalPodPidsLimit int64 - EnforceCPULimits bool - CPUCFSQuotaPeriod time.Duration - ExperimentalTopologyManagerPolicy string + QOSReserved map[v1.ResourceName]int64 + ExperimentalCPUManagerPolicy string + ExperimentalCPUManagerPolicyOptions map[string]string + ExperimentalTopologyManagerScope string + ExperimentalCPUManagerReconcilePeriod time.Duration + ExperimentalMemoryManagerPolicy string + ExperimentalMemoryManagerReservedMemory []kubeletconfig.MemoryReservation + ExperimentalQoSResourceManagerReconcilePeriod time.Duration + QoSResourceManagerResourceNamesMap map[string]string + ExperimentalPodPidsLimit int64 + EnforceCPULimits bool + CPUCFSQuotaPeriod time.Duration + ExperimentalTopologyManagerPolicy string } type NodeAllocatableConfig struct { @@ -195,6 +209,78 @@ func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) { return &reservations, nil } +func containerResourcesFromResourceManagerAllocatableResponse(res *resourcepluginapi.GetTopologyAwareAllocatableResourcesResponse) []*podresourcesapi.AllocatableTopologyAwareResource { + if res == nil { + return nil + } + + result := make([]*podresourcesapi.AllocatableTopologyAwareResource, 0, len(res.AllocatableResources)) + + for resourceName, resource := range res.AllocatableResources { + if resource == nil { + continue + } + + result = append(result, &podresourcesapi.AllocatableTopologyAwareResource{ + ResourceName: resourceName, + IsNodeResource: resource.IsNodeResource, + IsScalarResource: resource.IsScalarResource, + AggregatedAllocatableQuantity: resource.AggregatedAllocatableQuantity, + TopologyAwareAllocatableQuantityList: transformTopologyAwareQuantity(resource.TopologyAwareAllocatableQuantityList), + AggregatedCapacityQuantity: resource.AggregatedCapacityQuantity, + TopologyAwareCapacityQuantityList: transformTopologyAwareQuantity(resource.TopologyAwareCapacityQuantityList), + }) + } + + return result +} + +func containerResourcesFromResourceManagerResponse(res *resourcepluginapi.GetTopologyAwareResourcesResponse) []*podresourcesapi.TopologyAwareResource { + if res == nil || + res.ContainerTopologyAwareResources == nil { + return nil + } + + result := make([]*podresourcesapi.TopologyAwareResource, 0, len(res.ContainerTopologyAwareResources.AllocatedResources)) + + for resourceName, resource := range res.ContainerTopologyAwareResources.AllocatedResources { + if resource == nil { + continue + } + + result = append(result, &podresourcesapi.TopologyAwareResource{ + ResourceName: resourceName, + IsNodeResource: resource.IsNodeResource, + IsScalarResource: resource.IsScalarResource, + AggregatedQuantity: resource.AggregatedQuantity, + OriginalAggregatedQuantity: resource.OriginalAggregatedQuantity, + TopologyAwareQuantityList: transformTopologyAwareQuantity(resource.TopologyAwareQuantityList), + OriginalTopologyAwareQuantityList: transformTopologyAwareQuantity(resource.OriginalTopologyAwareQuantityList), + }) + } + + return result +} + +func transformTopologyAwareQuantity(pluginAPITopologyAwareQuantityList []*resourcepluginapi.TopologyAwareQuantity) []*podresourcesapi.TopologyAwareQuantity { + if pluginAPITopologyAwareQuantityList == nil { + return nil + } + + topologyAwareQuantityList := make([]*podresourcesapi.TopologyAwareQuantity, 0, len(pluginAPITopologyAwareQuantityList)) + + for _, topologyAwareQuantity := range pluginAPITopologyAwareQuantityList { + if topologyAwareQuantity != nil { + topologyAwareQuantityList = append(topologyAwareQuantityList, &podresourcesapi.TopologyAwareQuantity{ + ResourceValue: topologyAwareQuantity.ResourceValue, + Node: topologyAwareQuantity.Node, + }) + } + } + + return topologyAwareQuantityList +} + func containerDevicesFromResourceDeviceInstances(devs devicemanager.ResourceDeviceInstances) []*podresourcesapi.ContainerDevices { var respDevs []*podresourcesapi.ContainerDevices diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 0f09f3eb33121..a4751f9ad169f 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -46,6 +46,7 @@ import ( "k8s.io/client-go/tools/record" utilsysctl "k8s.io/component-helpers/node/util/sysctl" internalapi "k8s.io/cri-api/pkg/apis" + pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/cadvisor" @@ -55,6 +56,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" memorymanagerstate "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" + "k8s.io/kubernetes/pkg/kubelet/cm/qosresourcemanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" "k8s.io/kubernetes/pkg/kubelet/config" @@ -122,6 +124,8 @@ type containerManagerImpl struct { qosContainerManager QOSContainerManager // Interface for exporting and allocating devices reported by device plugins. deviceManager devicemanager.Manager + // Interface for exporting and allocating resources reported by resource plugins. + qosResourceManager qosresourcemanager.Manager // Interface for CPU affinity management. cpuManager cpumanager.Manager // Interface for memory affinity management. @@ -345,6 +349,18 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I cm.topologyManager.AddHintProvider(cm.memoryManager) } + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.QoSResourceManager) { + klog.Infof("QosResourceManager enabled, added as a provider for topology manager") + cm.qosResourceManager, err = qosresourcemanager.NewManagerImpl(cm.topologyManager, nodeConfig.ExperimentalQoSResourceManagerReconcilePeriod, nodeConfig.QoSResourceManagerResourceNamesMap) + if err != nil { + klog.Errorf("failed to initialize qos resource manager: %v", err) + return nil, err + } + cm.topologyManager.AddHintProvider(cm.qosResourceManager) + } else { + cm.qosResourceManager, err = qosresourcemanager.NewManagerStub() + } + return cm, nil } @@ -574,6 +590,13 @@ func (cm *containerManagerImpl) Start(node *v1.Node, } } + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.QoSResourceManager) { + err := cm.qosResourceManager.Start(qosresourcemanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService) + if err != nil { + return fmt.Errorf("start QoSResourceManager error: %v", err) + } + } + // cache the node Info including resource capacity and // allocatable of the node cm.nodeInfo = node @@ -638,8 +661,11 @@ func (cm *containerManagerImpl) Start(node *v1.Node, return nil } -func (cm *containerManagerImpl) GetPluginRegistrationHandler() cache.PluginHandler { - return cm.deviceManager.GetWatcherHandler() +func (cm *containerManagerImpl) GetPluginRegistrationHandler() map[string]cache.PluginHandler { + return map[string]cache.PluginHandler{ + pluginwatcherapi.DevicePlugin: cm.deviceManager.GetWatcherHandler(), + pluginwatcherapi.ResourcePlugin: cm.qosResourceManager.GetWatcherHandler(), + } } // TODO: move the GetResources logic to PodContainerManager. @@ -918,6 +944,10 @@ func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { return cm.capacity } +func (cm *containerManagerImpl) GetResourcePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return cm.qosResourceManager.GetCapacity() +} + func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { return cm.deviceManager.GetCapacity() } @@ -961,7 +991,8 @@ func (cm *containerManagerImpl) GetAllocatableMemory() []*podresourcesapi.Contai } func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { - return cm.deviceManager.ShouldResetExtendedResourceCapacity() + // [TODO](sunjianyu): need we identify resources managed by device manager or qos resource manager and deal with them respectively? + return cm.deviceManager.ShouldResetExtendedResourceCapacity() || cm.qosResourceManager.ShouldResetExtendedResourceCapacity() } func (cm *containerManagerImpl) UpdateAllocatedDevices() { @@ -989,3 +1020,40 @@ func containerMemoryFromBlock(blocks []memorymanagerstate.Block) []*podresources return containerMemories } + +func (cm *containerManagerImpl) GetResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) { + return cm.qosResourceManager.GetResourceRunContainerOptions(pod, container) +} + +// GetTopologyAwareResources returns information about the resources assigned to pods and containers in topology aware format +func (cm *containerManagerImpl) GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.TopologyAwareResource { + if pod == nil || container == nil { + klog.Errorf("GetTopologyAwareResources got nil pod: %v or container: %v", pod, container) + return nil + } + + resp, err := cm.qosResourceManager.GetTopologyAwareResources(pod, container) + + if err != nil { + klog.Errorf("qos resource manager GetTopologyAwareResources for pod: %s container: %s failed with error: %v", pod.UID, container.Name, err) + return nil + } + + return containerResourcesFromResourceManagerResponse(resp) +} + +// GetTopologyAwareAllocatableResources returns information about all the resources known to the manager in topology aware format +func (cm *containerManagerImpl) GetTopologyAwareAllocatableResources() []*podresourcesapi.AllocatableTopologyAwareResource { + resp, err := cm.qosResourceManager.GetTopologyAwareAllocatableResources() + + if err != nil { + klog.Errorf("qos resource manager GetTopologyAwareAllocatableResources failed with error: %v", err) + return nil + } + + return containerResourcesFromResourceManagerAllocatableResponse(resp) +} + +func (cm *containerManagerImpl) UpdateAllocatedResources() { + cm.qosResourceManager.UpdateAllocatedResources() +} diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index a5b0e523b1377..fccfb4ccdf162 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -83,7 +83,7 @@ func (cm *containerManagerStub) GetCapacity() v1.ResourceList { return c } -func (cm *containerManagerStub) GetPluginRegistrationHandler() cache.PluginHandler { +func (cm *containerManagerStub) GetPluginRegistrationHandler() map[string]cache.PluginHandler { return nil } @@ -91,6 +91,10 @@ func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceLi return cm.extendedPluginResources, cm.extendedPluginResources, []string{} } +func (cm *containerManagerStub) GetResourcePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} +} + func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager { return &podContainerManagerStub{} } @@ -131,6 +135,10 @@ func (cm *containerManagerStub) UpdateAllocatedDevices() { return } +func (cm *containerManagerStub) GetResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) { + return &kubecontainer.ResourceRunContainerOptions{}, nil +} + func (cm *containerManagerStub) GetCPUs(_, _ string) []int64 { return nil } @@ -151,6 +159,17 @@ func (cm *containerManagerStub) GetNodeAllocatableAbsolute() v1.ResourceList { return nil } +func (cm *containerManagerStub) GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.TopologyAwareResource { + return nil +} + +func (cm *containerManagerStub) GetTopologyAwareAllocatableResources() []*podresourcesapi.AllocatableTopologyAwareResource { + return nil +} + +func (cm *containerManagerStub) UpdateAllocatedResources() { +} + func NewStubContainerManager() ContainerManager { return &containerManagerStub{shouldResetExtendedResourceCapacity: false} } diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index ec56fcf2c73b6..44510e5fc750e 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -183,6 +183,14 @@ func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceLi return cm.deviceManager.GetCapacity() } +func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} +} + +func (cm *containerManagerImpl) GetResourcePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} +} + func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager { return &podContainerManagerStub{} } @@ -255,3 +263,16 @@ func (cm *containerManagerImpl) GetAllocatableMemory() []*podresourcesapi.Contai func (cm *containerManagerImpl) GetNodeAllocatableAbsolute() v1.ResourceList { return nil } + +// GetResources returns information about the resources assigned to pods and containers in topology aware format +func (cm *containerManagerImpl) GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.TopologyAwareResource { + return nil +} + +// GetAllocatableResources returns information about all the resources known to the manager in topology aware format +func (cm *containerManagerImpl) GetTopologyAwareAllocatableResources() []*podresourcesapi.AllocatableTopologyAwareResource { + return nil +} + +func (cm *containerManagerImpl) UpdateAllocatedResources() { +} diff --git a/pkg/kubelet/cm/fake_container_manager.go b/pkg/kubelet/cm/fake_container_manager.go index c907301a6d4e4..f943d0db45ec7 100644 --- a/pkg/kubelet/cm/fake_container_manager.go +++ b/pkg/kubelet/cm/fake_container_manager.go @@ -118,7 +118,42 @@ func (cm *FakeContainerManager) GetCapacity() v1.ResourceList { return c } -func (cm *FakeContainerManager) GetPluginRegistrationHandler() cache.PluginHandler { +func (cm *FakeContainerManager) UpdateAllocatedResources() { + cm.Lock() + defer cm.Unlock() + cm.CalledFunctions = append(cm.CalledFunctions, "UpdateAllocatedResources") + return +} + +func (cm *FakeContainerManager) GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.TopologyAwareResource { + cm.Lock() + defer cm.Unlock() + cm.CalledFunctions = append(cm.CalledFunctions, "GetTopologyAwareResources") + return nil +} + +func (cm *FakeContainerManager) GetTopologyAwareAllocatableResources() []*podresourcesapi.AllocatableTopologyAwareResource { + cm.Lock() + defer cm.Unlock() + cm.CalledFunctions = append(cm.CalledFunctions, "GetTopologyAwareAllocatableResources") + return nil +} + +func (cm *FakeContainerManager) GetResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) { + cm.Lock() + defer cm.Unlock() + cm.CalledFunctions = append(cm.CalledFunctions, "GetResourceRunContainerOptions") + return &kubecontainer.ResourceRunContainerOptions{}, nil +} + +func (cm *FakeContainerManager) GetResourcePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + cm.Lock() + defer cm.Unlock() + cm.CalledFunctions = append(cm.CalledFunctions, "GetResourcePluginResourceCapacity") + return nil, nil, []string{} +} + +func (cm *FakeContainerManager) GetPluginRegistrationHandler() map[string]cache.PluginHandler { cm.Lock() defer cm.Unlock() cm.CalledFunctions = append(cm.CalledFunctions, "GetPluginRegistrationHandler") diff --git a/pkg/kubelet/cm/qosresourcemanager/BUILD b/pkg/kubelet/cm/qosresourcemanager/BUILD new file mode 100644 index 0000000000000..89a6e36e86f66 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/BUILD @@ -0,0 +1,104 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "constants.go", + "endpoint.go", + "manager.go", + "manager_stub.go", + "pod_resources.go", + "resource_plugin_stub.go", + "topology_hints.go", + "types.go", + "utils.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/qosresourcemanager", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/pod:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/kubelet/checkpointmanager:go_default_library", + "//pkg/kubelet/checkpointmanager/errors:go_default_library", + "//pkg/kubelet/cm/cpumanager/topology:go_default_library", + "//pkg/kubelet/cm/qosresourcemanager/checkpoint:go_default_library", + "//pkg/kubelet/cm/topologymanager:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", + "//pkg/kubelet/config:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/lifecycle:go_default_library", + "//pkg/kubelet/metrics:go_default_library", + "//pkg/kubelet/pluginmanager/cache:go_default_library", + "//pkg/kubelet/status:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//pkg/util/selinux:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/kubelet/cm/qosresourcemanager/checkpoint:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "endpoint_test.go", + "manager_test.go", + "pod_resources_test.go", + "topology_hints_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/api/pod:go_default_library", + "//pkg/kubelet/checkpointmanager:go_default_library", + "//pkg/kubelet/cm/topologymanager:go_default_library", + "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", + "//pkg/kubelet/config:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/lifecycle:go_default_library", + "//pkg/kubelet/pluginmanager:go_default_library", + "//pkg/kubelet/status:go_default_library", + "//pkg/kubelet/status/testing:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", + "//staging/src/k8s.io/cri-api/pkg/apis/testing:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library", + "//staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) diff --git a/pkg/kubelet/cm/qosresourcemanager/OWNERS b/pkg/kubelet/cm/qosresourcemanager/OWNERS new file mode 100644 index 0000000000000..f613620e44158 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/OWNERS @@ -0,0 +1,3 @@ +approvers: +- sunjianyu +- shaowei diff --git a/pkg/kubelet/cm/qosresourcemanager/checkpoint/BUILD b/pkg/kubelet/cm/qosresourcemanager/checkpoint/BUILD new file mode 100644 index 0000000000000..48f2efdf4c2b1 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/checkpoint/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["checkpoint.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/qosresourcemanager/checkpoint", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubelet/checkpointmanager:go_default_library", + "//pkg/kubelet/checkpointmanager/checksum:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubelet/cm/qosresourcemanager/checkpoint/checkpoint.go b/pkg/kubelet/cm/qosresourcemanager/checkpoint/checkpoint.go new file mode 100644 index 0000000000000..86a8d60f9c38d --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/checkpoint/checkpoint.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "encoding/json" + + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum" +) + +// ResourceManagerCheckpoint defines the operations to retrieve pod resources +type ResourceManagerCheckpoint interface { + checkpointmanager.Checkpoint + GetData() []PodResourcesEntry +} + +// PodResourcesEntry connects pod information to resources +type PodResourcesEntry struct { + PodUID string + ContainerName string + ResourceName string + AllocationInfo string +} + +// checkpointData struct is used to store pod to resource allocation information +// in a checkpoint file. +// TODO: add version control when we need to change checkpoint format. +type checkpointData struct { + PodResourceEntries []PodResourcesEntry +} + +// Data holds checkpoint data and its checksum +type Data struct { + Data checkpointData + Checksum checksum.Checksum +} + +// New returns an instance of Checkpoint +func New(resEntries []PodResourcesEntry) ResourceManagerCheckpoint { + return &Data{ + Data: checkpointData{ + PodResourceEntries: resEntries, + }, + } +} + +// MarshalCheckpoint returns marshalled data +func (cp *Data) MarshalCheckpoint() ([]byte, error) { + cp.Checksum = checksum.New(cp.Data) + return json.Marshal(*cp) +} + +// UnmarshalCheckpoint returns unmarshalled data +func (cp *Data) UnmarshalCheckpoint(blob []byte) error { + return json.Unmarshal(blob, cp) +} + +// VerifyChecksum verifies that passed checksum is same as calculated checksum +func (cp *Data) VerifyChecksum() error { + return cp.Checksum.Verify(cp.Data) +} + +// GetData returns resource entries and registered resources +func (cp *Data) GetData() []PodResourcesEntry { + return cp.Data.PodResourceEntries +} diff --git a/pkg/kubelet/cm/qosresourcemanager/constants.go b/pkg/kubelet/cm/qosresourcemanager/constants.go new file mode 100644 index 0000000000000..e3a2a46f92cef --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/constants.go @@ -0,0 +1,6 @@ +package qosresourcemanager + +const ( + MainContainerNameAnnotationKey = "kubernetes.io/main-container-name" + DaemonsetKind = "DaemonSet" +) diff --git a/pkg/kubelet/cm/qosresourcemanager/endpoint.go b/pkg/kubelet/cm/qosresourcemanager/endpoint.go new file mode 100644 index 0000000000000..7ff2067e8a92e --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/endpoint.go @@ -0,0 +1,205 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qosresourcemanager + +import ( + "context" + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" +) + +// endpoint maps to a single registered resource plugin. It is responsible +// for managing gRPC communications with the resource plugin and caching +// resource states reported by the resource plugin. +type endpoint interface { + // [TODO] if we need list&watch resource plugin, + // then we need run function. + //run(success chan<- bool) + stop() + allocate(c context.Context, resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) + getTopologyHints(c context.Context, resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceHintsResponse, error) + getTopologyAwareAllocatableResources(c context.Context, request *pluginapi.GetTopologyAwareAllocatableResourcesRequest) (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) + getTopologyAwareResources(c context.Context, request *pluginapi.GetTopologyAwareResourcesRequest) (*pluginapi.GetTopologyAwareResourcesResponse, error) + preStartContainer(pod *v1.Pod, container *v1.Container) (*pluginapi.PreStartContainerResponse, error) + getResourceAllocation(c context.Context, request *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) + removePod(c context.Context, removePodRequest *pluginapi.RemovePodRequest) (*pluginapi.RemovePodResponse, error) + isStopped() bool + stopGracePeriodExpired() bool +} + +type endpointImpl struct { + client pluginapi.ResourcePluginClient + clientConn *grpc.ClientConn + + socketPath string + resourceName string + stopTime time.Time + + mutex sync.Mutex +} + +// newEndpointImpl creates a new endpoint for the given resourceName. +// This is to be used during normal resource plugin registration. +func newEndpointImpl(socketPath, resourceName string) (*endpointImpl, error) { + client, c, err := dial(socketPath) + if err != nil { + klog.Errorf("[qosresourcemanager] Can't create new endpoint with path %s err %v", socketPath, err) + return nil, err + } + + return &endpointImpl{ + client: client, + clientConn: c, + + socketPath: socketPath, + resourceName: resourceName, + }, nil +} + +// newStoppedEndpointImpl creates a new endpoint for the given resourceName with stopTime set. +// This is to be used during Kubelet restart, before the actual resource plugin re-registers. +func newStoppedEndpointImpl(resourceName string) *endpointImpl { + return &endpointImpl{ + resourceName: resourceName, + stopTime: time.Now(), + } +} + +func (e *endpointImpl) isStopped() bool { + e.mutex.Lock() + defer e.mutex.Unlock() + return !e.stopTime.IsZero() +} + +func (e *endpointImpl) stopGracePeriodExpired() bool { + e.mutex.Lock() + defer e.mutex.Unlock() + return !e.stopTime.IsZero() && time.Since(e.stopTime) > endpointStopGracePeriod +} + +// used for testing only +func (e *endpointImpl) setStopTime(t time.Time) { + e.mutex.Lock() + defer e.mutex.Unlock() + e.stopTime = t +} + +// allocate issues Allocate gRPC call to the resource plugin. +func (e *endpointImpl) allocate(c context.Context, resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + if e.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, e) + } + ctx, cancel := context.WithTimeout(c, pluginapi.KubeletResourcePluginAllocateRPCTimeoutInSecs*time.Second) + defer cancel() + return e.client.Allocate(ctx, resourceRequest) +} + +// getTopologyHints issues GetTopologyHints gRPC call to the resource plugin. +func (e *endpointImpl) getTopologyHints(c context.Context, resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceHintsResponse, error) { + if e.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, e) + } + ctx, cancel := context.WithTimeout(c, pluginapi.KubeletResourcePluginGetTopologyHintsRPCTimeoutInSecs*time.Second) + defer cancel() + return e.client.GetTopologyHints(ctx, resourceRequest) +} + +// preStartContainer issues PreStartContainer gRPC call to the resource plugin. +func (e *endpointImpl) preStartContainer(pod *v1.Pod, container *v1.Container) (*pluginapi.PreStartContainerResponse, error) { + if e.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, e) + } + ctx, cancel := context.WithTimeout(context.Background(), pluginapi.KubeletResourcePluginPreStartContainerRPCTimeoutInSecs*time.Second) + defer cancel() + return e.client.PreStartContainer(ctx, &pluginapi.PreStartContainerRequest{ + PodUid: string(pod.UID), + PodNamespace: pod.Namespace, + PodName: pod.Name, + ContainerName: container.Name, + }) +} + +func (e *endpointImpl) stop() { + e.mutex.Lock() + defer e.mutex.Unlock() + if e.clientConn != nil { + e.clientConn.Close() + } + e.stopTime = time.Now() +} + +func (e *endpointImpl) getTopologyAwareAllocatableResources(c context.Context, request *pluginapi.GetTopologyAwareAllocatableResourcesRequest) (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) { + if e.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, e) + } + ctx, cancel := context.WithTimeout(c, pluginapi.KubeletResourcePluginGetTopologyAwareAllocatableResourcesRPCTimeoutInSecs*time.Second) + defer cancel() + return e.client.GetTopologyAwareAllocatableResources(ctx, request) +} + +func (e *endpointImpl) getTopologyAwareResources(c context.Context, request *pluginapi.GetTopologyAwareResourcesRequest) (*pluginapi.GetTopologyAwareResourcesResponse, error) { + if e.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, e) + } + ctx, cancel := context.WithTimeout(c, pluginapi.KubeletResourcePluginGetTopologyAwareResourcesRPCTimeoutInSecs*time.Second) + defer cancel() + return e.client.GetTopologyAwareResources(ctx, request) +} + +func (e *endpointImpl) getResourceAllocation(c context.Context, request *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) { + if e.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, e) + } + ctx, cancel := context.WithTimeout(c, pluginapi.KubeletResourcePluginGetResourcesAllocationRPCTimeoutInSecs*time.Second) + defer cancel() + return e.client.GetResourcesAllocation(ctx, request) +} + +func (e *endpointImpl) removePod(c context.Context, removePodRequest *pluginapi.RemovePodRequest) (*pluginapi.RemovePodResponse, error) { + if e.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, e) + } + ctx, cancel := context.WithTimeout(c, pluginapi.KubeletResourcePluginRemovePodRPCTimeoutInSecs*time.Second) + defer cancel() + return e.client.RemovePod(ctx, removePodRequest) +} + +// dial establishes the gRPC communication with the registered resource plugin. https://godoc.org/google.golang.org/grpc#Dial +func dial(unixSocketPath string) (pluginapi.ResourcePluginClient, *grpc.ClientConn, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c, err := grpc.DialContext(ctx, unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", addr) + }), + ) + + if err != nil { + return nil, nil, fmt.Errorf(errFailedToDialResourcePlugin+" %v", err) + } + + return pluginapi.NewResourcePluginClient(c), c, nil +} diff --git a/pkg/kubelet/cm/qosresourcemanager/endpoint_test.go b/pkg/kubelet/cm/qosresourcemanager/endpoint_test.go new file mode 100644 index 0000000000000..c7f5be9ba13a5 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/endpoint_test.go @@ -0,0 +1,112 @@ +package qosresourcemanager + +import ( + "context" + "path" + "testing" + + "github.com/stretchr/testify/require" + + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" +) + +var ( + eSocketName = "mock.sock" +) + +func TestNewEndpoint(t *testing.T) { + socket := path.Join("/tmp", eSocketName) + + p, e := eSetup(t, socket, "mock") + defer eCleanup(t, p, e) +} + +func TestAllocate(t *testing.T) { + socket := path.Join("/tmp", eSocketName) + p, e := eSetup(t, socket, "mock") + defer eCleanup(t, p, e) + + req := generateResourceRequest() + resp := generateResourceResponse() + + p.SetAllocFunc(func(r *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + return resp, nil + }) + + respOut, err := e.allocate(context.TODO(), req) + require.NoError(t, err) + require.Equal(t, resp, respOut) +} + +func generateResourceRequest() *pluginapi.ResourceRequest { + return &pluginapi.ResourceRequest{ + PodUid: "mock_pod", + PodNamespace: "mock_pod_ns", + PodName: "mock_pod_name", + ContainerName: "mock_con_name", + //IsInitContainer: false, + PodRole: "mock_role", + PodType: "mock_type", + ResourceName: "mock_res", + Hint: &pluginapi.TopologyHint{ + Nodes: []uint64{0, 1}, + Preferred: true, + }, + ResourceRequests: map[string]float64{ + "mock_res": 2, + }, + } +} + +func generateResourceResponse() *pluginapi.ResourceAllocationResponse { + return &pluginapi.ResourceAllocationResponse{ + PodUid: "mock_pod", + PodNamespace: "mock_pod_ns", + PodName: "mock_pod_name", + ContainerName: "mock_con_name", + //IsInitContainer: false, + PodRole: "mock_role", + PodType: "mock_type", + ResourceName: "mock_res", + AllocationResult: &pluginapi.ResourceAllocation{ + ResourceAllocation: map[string]*pluginapi.ResourceAllocationInfo{ + "mock_res": generateResourceAllocationInfo(), + }, + }, + } +} + +// todo, only containers setAllocFunc, wo we can't mock the testing cases for other RPC calls +// actually, since endpoint for qrm only performs as a pure proxy, there is no need to add +// more test cases before its interface changes to list-watch +func generateGetTopologyAwareAllocatableResourcesRequest() *pluginapi.GetTopologyAwareAllocatableResourcesRequest { + return &pluginapi.GetTopologyAwareAllocatableResourcesRequest{} +} + +func generateGetTopologyAwareResourcesRequest() *pluginapi.GetTopologyAwareResourcesRequest { + return &pluginapi.GetTopologyAwareResourcesRequest{ + PodUid: "mock_pod", + ContainerName: "mock_con_name", + } +} + +func generateGetResourcesAllocationRequest() *pluginapi.GetResourcesAllocationRequest { + return &pluginapi.GetResourcesAllocationRequest{} +} + +func eSetup(t *testing.T, socket, resourceName string) (*Stub, *endpointImpl) { + p := NewResourcePluginStub(socket, resourceName, false) + + err := p.Start() + require.NoError(t, err) + + e, err := newEndpointImpl(socket, resourceName) + require.NoError(t, err) + + return p, e +} + +func eCleanup(t *testing.T, p *Stub, e *endpointImpl) { + p.Stop() + e.stop() +} diff --git a/pkg/kubelet/cm/qosresourcemanager/manager.go b/pkg/kubelet/cm/qosresourcemanager/manager.go new file mode 100644 index 0000000000000..8fbef63043e70 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/manager.go @@ -0,0 +1,1261 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qosresourcemanager + +import ( + "context" + "fmt" + "math" + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "k8s.io/klog/v2" + + "github.com/opencontainers/selinux/go-selinux" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + errorsutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" + "k8s.io/kubernetes/pkg/kubelet/cm/qosresourcemanager/checkpoint" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" + "k8s.io/kubernetes/pkg/kubelet/status" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + schedutil "k8s.io/kubernetes/pkg/scheduler/util" + maputil "k8s.io/kubernetes/pkg/util/maps" +) + +// ManagerImpl is the structure in charge of managing Resource Plugins. +type ManagerImpl struct { + socketname string + socketdir string + + endpoints map[string]endpointInfo // Key is ResourceName + + // lock when accesing endpoints and allocatedScalarResourcesQuantity + mutex sync.Mutex + + server *grpc.Server + wg sync.WaitGroup + + // activePods is a method for listing active pods on the node + // so the amount of pluginResources requested by existing pods + // could be counted when updating allocated resources + activePods ActivePodsFunc + + // sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. + // We use it to determine when we can purge inactive pods from checkpointed state. + sourcesReady config.SourcesReady + + // contains allocated scalar resources quantity, keyed by resourceName. + allocatedScalarResourcesQuantity map[string]float64 + + // podResources contains pod to allocated resources mapping. + podResources *podResourcesChk + checkpointManager checkpointmanager.CheckpointManager + + // Store of Topology Affinties that the Resource Manager can query. + topologyAffinityStore topologymanager.Store + + // podStatusProvider provides a method for obtaining pod statuses + // and the containerID of their containers + podStatusProvider status.PodStatusProvider + + // containerRuntime is the container runtime service interface needed + // to make UpdateContainerResources() calls against the containers. + containerRuntime runtimeService + + // reconcilePeriod is the duration between calls to reconcileState. + reconcilePeriod time.Duration + + // Map of resource name "A" to resource name "B" during QoS Resource Manager allocation period. + // It's useful for the same kind resource with different types. (eg. maps best-effort-cpu to cpu) + resourceNamesMap map[string]string +} + +type endpointInfo struct { + e endpoint + opts *pluginapi.ResourcePluginOptions +} + +type sourcesReadyStub struct{} + +// PodReusableResources is a map by pod uid of resources to reuse. +type PodReusableResources map[string]ResourceAllocation + +func (s *sourcesReadyStub) AddSource(source string) {} +func (s *sourcesReadyStub) AllReady() bool { return true } + +// NewManagerImpl creates a new manager. +func NewManagerImpl(topologyAffinityStore topologymanager.Store, reconcilePeriod time.Duration, resourceNamesMap map[string]string) (Manager, error) { + return newManagerImpl(pluginapi.KubeletSocket, topologyAffinityStore, reconcilePeriod, resourceNamesMap) +} + +func newManagerImpl(socketPath string, topologyAffinityStore topologymanager.Store, reconcilePeriod time.Duration, resourceNamesMap map[string]string) (*ManagerImpl, error) { + klog.V(2).Infof("[qosresourcemanager] Creating Resource Plugin manager at %s", socketPath) + + if socketPath == "" || !filepath.IsAbs(socketPath) { + return nil, fmt.Errorf(errBadSocket+" %s", socketPath) + } + + dir, file := filepath.Split(socketPath) + manager := &ManagerImpl{ + endpoints: make(map[string]endpointInfo), + + socketname: file, + socketdir: dir, + topologyAffinityStore: topologyAffinityStore, + podResources: newPodResourcesChk(), + allocatedScalarResourcesQuantity: make(map[string]float64), + reconcilePeriod: reconcilePeriod, + resourceNamesMap: resourceNamesMap, + } + + // The following structures are populated with real implementations in manager.Start() + // Before that, initializes them to perform no-op operations. + manager.activePods = func() []*v1.Pod { return []*v1.Pod{} } + manager.sourcesReady = &sourcesReadyStub{} + + checkpointManager, err := checkpointmanager.NewCheckpointManager(dir) + if err != nil { + return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err) + } + manager.checkpointManager = checkpointManager + + return manager, nil +} + +func (m *ManagerImpl) removeContents(dir string) error { + d, err := os.Open(dir) + if err != nil { + return err + } + defer d.Close() + names, err := d.Readdirnames(-1) + if err != nil { + return err + } + var errs []error + for _, name := range names { + filePath := filepath.Join(dir, name) + if filePath == m.checkpointFile() { + continue + } + stat, err := os.Stat(filePath) + if err != nil { + klog.Errorf("[qosresourcemanager] Failed to stat file %s: %v", filePath, err) + continue + } + if stat.IsDir() { + continue + } + err = os.RemoveAll(filePath) + if err != nil { + errs = append(errs, err) + klog.Errorf("[qosresourcemanager] Failed to remove file %s: %v", filePath, err) + continue + } + } + return errorsutil.NewAggregate(errs) +} + +// checkpointFile returns resource plugin checkpoint file path. +func (m *ManagerImpl) checkpointFile() string { + return filepath.Join(m.socketdir, kubeletQoSResourceManagerCheckpoint) +} + +// Start starts the QoS Resource Plugin Manager and start initialization of +// podResources and allocatedScalarResourcesQuantity information from checkpointed state and +// starts resource plugin registration service. +func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) error { + klog.V(2).Infof("[qosresourcemanager] Starting Resource Plugin manager") + + m.activePods = activePods + m.sourcesReady = sourcesReady + m.podStatusProvider = podStatusProvider + m.containerRuntime = containerRuntime + + // Loads in podResources information from disk. + err := m.readCheckpoint() + if err != nil { + klog.Warningf("[qosresourcemanager] Continue after failing to read checkpoint file. Resource allocation info may NOT be up-to-date. Err: %v", err) + } + + socketPath := filepath.Join(m.socketdir, m.socketname) + + if err = os.MkdirAll(m.socketdir, 0750); err != nil { + return err + } + if selinux.GetEnabled() { + if err := selinux.SetFileLabel(m.socketdir, config.KubeletPluginsDirSELinuxLabel); err != nil { + klog.Warningf("[qosresourcemanager] Unprivileged containerized plugins might not work. Could not set selinux context on %s: %v", m.socketdir, err) + } + } + + // Removes all stale sockets in m.socketdir. Resource plugins can monitor + // this and use it as a signal to re-register with the new Kubelet. + if err := m.removeContents(m.socketdir); err != nil { + klog.Errorf("[qosresourcemanager] Fail to clean up stale contents under %s: %v", m.socketdir, err) + } + + s, err := net.Listen("unix", socketPath) + if err != nil { + klog.Errorf(errListenSocket+" %v", err) + return err + } + + m.wg.Add(1) + m.server = grpc.NewServer([]grpc.ServerOption{}...) + + pluginapi.RegisterRegistrationServer(m.server, m) + + ctx, cancel := context.WithCancel(context.Background()) + + klog.V(2).Infof("[qosresourcemanager] Serving resource plugin registration server on %q", socketPath) + go func() { + defer func() { + m.wg.Done() + cancel() + + if err := recover(); err != nil { + klog.Fatalf("[qosresourcemanager] Start recover from err: %v", err) + } + }() + m.server.Serve(s) + }() + + klog.Infof("[qosresourcemanager] reconciling every %v", m.reconcilePeriod) + + // Periodically call m.reconcileState() to continue to keep the resources allocation for + // all active pods in sync with the latest result allocated by corresponding resource plugin + go wait.Until(func() { m.reconcileState() }, m.reconcilePeriod, ctx.Done()) + + return nil +} + +// GetWatcherHandler returns the plugin handler +func (m *ManagerImpl) GetWatcherHandler() cache.PluginHandler { + if f, err := os.Create(m.socketdir + "DEPRECATION"); err != nil { + klog.Errorf("Failed to create deprecation file at %s", m.socketdir) + } else { + f.Close() + klog.V(4).Infof("created deprecation file %s", f.Name()) + } + + return cache.PluginHandler(m) +} + +// ValidatePlugin validates a plugin if the version is correct and the name has the format of an extended resource +func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string) error { + klog.V(2).Infof("Got Plugin %s at endpoint %s with versions %v", pluginName, endpoint, versions) + + if !m.isVersionCompatibleWithPlugin(versions) { + return fmt.Errorf("manager version, %s, is not among plugin supported versions %v", pluginapi.Version, versions) + } + + return nil +} + +// RegisterPlugin starts the endpoint and registers it +func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string, versions []string) error { + klog.V(2).Infof("[qosresourcemanager] Registering Plugin %s at endpoint %s", pluginName, endpoint) + + e, err := newEndpointImpl(endpoint, pluginName) + if err != nil { + return fmt.Errorf("[qosresourcemanager] failed to dial resource plugin with socketPath %s: %v", endpoint, err) + } + + options, err := e.client.GetResourcePluginOptions(context.Background(), &pluginapi.Empty{}) + if err != nil { + return fmt.Errorf("[qosresourcemanager] failed to get resource plugin options: %v", err) + } + + m.registerEndpoint(pluginName, options, e) + + return nil +} + +// DeRegisterPlugin deregisters the plugin +// TODO work on the behavior for deregistering plugins +// e.g: Should we delete the resource +func (m *ManagerImpl) DeRegisterPlugin(pluginName string) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if eI, ok := m.endpoints[pluginName]; ok { + eI.e.stop() + } +} + +func (m *ManagerImpl) isVersionCompatibleWithPlugin(versions []string) bool { + // TODO(sunjianyu): Currently this is fine as we only have a single supported version. When we do need to support + // multiple versions in the future, we may need to extend this function to return a supported version. + // E.g., say kubelet supports v1beta1 and v1beta2, and we get v1alpha1 and v1beta1 from a resource plugin, + // this function should return v1beta1 + for _, version := range versions { + for _, supportedVersion := range pluginapi.SupportedVersions { + if version == supportedVersion { + return true + } + } + } + return false +} + +// Allocate is the call that you can use to allocate a set of resources +// from the registered resource plugins. +func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error { + if pod == nil || container == nil { + return fmt.Errorf("Allocate got nil pod: %v or container: %v", pod, container) + } + + containerType, containerIndex, err := GetContainerTypeAndIndex(pod, container) + + if err != nil { + return fmt.Errorf("GetContainerTypeAndIndex failed with error: %v", err) + } + + if err = m.allocateContainerResources(pod, container, containerType, containerIndex, false); err != nil { + return err + } + return nil +} + +// ReAllocate is the call that you can use to re-allocate a set of resources during reconciling +func (m *ManagerImpl) reAllocate(pod *v1.Pod, container *v1.Container) error { + if pod == nil || container == nil { + return fmt.Errorf("Allocate got nil pod: %v or container: %v", pod, container) + } + + containerType, containerIndex, err := GetContainerTypeAndIndex(pod, container) + + if err != nil { + return fmt.Errorf("GetContainerTypeAndIndex failed with error: %v", err) + } + + if err = m.allocateContainerResources(pod, container, containerType, containerIndex, true); err != nil { + return err + } + return nil +} + +func (m *ManagerImpl) isContainerRequestResource(container *v1.Container, resourceName string) (bool, error) { + if container == nil { + return false, nil + } + + for k := range container.Resources.Requests { + requestedResourceName, err := m.getMappedResourceName(string(k), container.Resources.Requests) + + if err != nil { + return false, err + } + + if requestedResourceName == resourceName { + return true, nil + } + } + + return false, nil +} + +// allocateContainerResources attempts to allocate all of required resource +// plugin resources for the input container, issues an Allocate rpc request +// for each new resource resource requirement, processes their AllocateResponses, +// and updates the cached containerResources on success. +func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, containerType pluginapi.ContainerType, containerIndex uint64, isReAllocation bool) error { + if pod == nil || container == nil { + return fmt.Errorf("allocateContainerResources met nil pod: %v or container: %v", pod, container) + } + + if isSkippedPod(pod, !isReAllocation) { + klog.Infof("[qosresourcemanager] skip pod: %s/%s, container: %s resource allocation with isReAllocation: %v", + pod.Namespace, pod.Name, container.Name, isReAllocation) + return nil + } + + podUID := string(pod.UID) + contName := container.Name + allocatedResourcesUpdated := false + // [TODO](sunjianyu): for accompanying resources, we may support request those resources in annotation later + for k, v := range container.Resources.Requests { + reqResource := string(k) + needed := int(v.Value()) + + resource, err := m.getMappedResourceName(reqResource, container.Resources.Requests) + + if err != nil { + return fmt.Errorf("getMappedResourceName failed with error: %v", err) + } + + if !m.isResourcePluginResource(resource) { + klog.Infof("[qosresourcemanager] resource %s is not supported by any resource plugin", resource) + continue + } + + klog.Infof("[qosresourcemanager] pod: %s/%s container: %s needs %d %s", + pod.Namespace, pod.Name, container.Name, needed, reqResource) + + // Updates allocated resources to garbage collect any stranded resources + // before doing the resource plugin allocation. + if !allocatedResourcesUpdated { + m.UpdateAllocatedResources() + allocatedResourcesUpdated = true + } + + // short circuit to regenerate the same allocationInfo if there are already + // allocated to the Container. This might happen after a + // kubelet restart, for example. + // think about a parent resource name with accompanying resources, we only check the result of the parent resource. + // if you want to allocate for accompanying resources every times, you can set the parent resource as non-scalar resource or set allocated quantity as zero + allocationInfo := m.podResources.containerResource(string(pod.UID), container.Name, resource) + if allocationInfo != nil { + + allocated := int(math.Ceil(allocationInfo.AllocatedQuantity)) + + if allocationInfo.IsScalarResource && allocated >= needed { + klog.Infof("[qosresourcemanager] resource %s already allocated to (pod %s/%s, container %v) with larger number than request: requested: %d, allocated: %d; not to allocate", + resource, pod.GetNamespace(), pod.GetName(), container.Name, needed, allocated) + continue + } else { + klog.Warningf("[qosresourcemanager] resource %s already allocated to (pod %s/%s, container %v) with smaller number than request: requested: %d, allocated: %d; continue to allocate", + resource, pod.GetNamespace(), pod.GetName(), container.Name, needed, allocated) + } + } + + startRPCTime := time.Now() + // Manager.Allocate involves RPC calls to resource plugin, which + // could be heavy-weight. Therefore we want to perform this operation outside + // mutex lock. Note if Allocate call fails, we may leave container resources + // partially allocated for the failed container. We rely on UpdateAllocatedResources() + // to garbage collect these resources later. Another side effect is that if + // we have X resource A and Y resource B in total, and two containers, container1 + // and container2 both require X resource A and Y resource B. Both allocation + // requests may fail if we serve them in mixed order. + // TODO: may revisit this part later if we see inefficient resource allocation + // in real use as the result of this. Should also consider to parallelize resource + // plugin Allocate grpc calls if it becomes common that a container may require + // resources from multiple resource plugins. + m.mutex.Lock() + eI, ok := m.endpoints[resource] + m.mutex.Unlock() + if !ok { + return fmt.Errorf("unknown Resource Plugin %s", resource) + } + + // TODO: refactor this part of code to just append a ContainerAllocationRequest + // in a passed in AllocateRequest pointer, and issues a single Allocate call per pod. + klog.V(3).Infof("[qosresourcemanager] making allocation request of %.3f resources %s for pod: %s/%s; container: %s", + ParseQuantityToFloat64(v), reqResource, pod.Namespace, pod.Name, container.Name) + + resourceReq := &pluginapi.ResourceRequest{ + PodUid: string(pod.GetUID()), + PodNamespace: pod.GetNamespace(), + PodName: pod.GetName(), + ContainerName: container.Name, + ContainerType: containerType, + ContainerIndex: containerIndex, + // customize for tce, PodRole and PodType should be identified by more general annotations + PodRole: pod.Labels[pluginapi.PodRoleLabelKey], + PodType: pod.Annotations[pluginapi.PodTypeAnnotationKey], + // use mapped resource name in "ResourceName" to indicates which endpoint to request + ResourceName: resource, + // use original requested resource name in "ResourceRequests" in order to make plugin identity real requested resource name + ResourceRequests: map[string]float64{reqResource: ParseQuantityToFloat64(v)}, + Labels: maputil.CopySS(pod.Labels), + Annotations: maputil.CopySS(pod.Annotations), + } + + if m.resourceHasTopologyAlignment(resource) { + hint := m.topologyAffinityStore.GetAffinity(podUID, contName) + + if hint.NUMANodeAffinity == nil { + klog.Warningf("[qosresourcemanager] pod: %s/%s; container: %s allocate resouce: %s without numa nodes affinity", + pod.Namespace, pod.Name, container.Name, resource) + } else { + klog.Warningf("[qosresourcemanager] pod: %s/%s; container: %s allocate resouce: %s get hint: %v from store", + pod.Namespace, pod.Name, container.Name, resource, hint) + } + + resourceReq.Hint = ParseTopologyManagerHint(hint) + } + + resp, err := eI.e.allocate(context.Background(), resourceReq) + metrics.ResourcePluginAllocationDuration.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime)) + if err != nil { + errMsg := fmt.Sprintf("allocate for resources %s for pod: %s/%s, container: %s got error: %v", + resource, pod.Namespace, pod.Name, container.Name, err) + klog.Errorf("[qosresourcemanager] %s", errMsg) + + // is case of endpoint not working, pass some types of pods don't necessarily require resource allocation. + if canSkipEndpointError(pod, resource) { + klog.Warningf("[qosresourcemanager] pod: %s/%s, container: %s skip %s endpoint allocation error", + pod.Namespace, pod.Name, container.Name, resource) + continue + } + + return fmt.Errorf(errMsg) + } + + // Update internal cached podResources state. + if resp.AllocationResult == nil { + klog.Warningf("[qosresourcemanager] allocation request for resources %s for pod: %s/%s, container: %s got nil allocation result", + resource, pod.Namespace, pod.Name, container.Name) + continue + } + + // [TODO](sunjianyu): to think abount a method to aviod accompanying resouce names conflict + for accResourceName, allocationInfo := range resp.AllocationResult.ResourceAllocation { + if allocationInfo == nil { + klog.Warningf("[qosresourcemanager] allocation request for resources %s - accompanying resource: %s for pod: %s/%s, container: %s got nil allocation infomation", + resource, accResourceName, pod.Namespace, pod.Name, container.Name) + continue + } + + klog.V(4).Infof("[qosresourcemanager] allocation information for resources %s - accompanying resource: %s for pod: %s/%s, container: %s is %v", + resource, accResourceName, pod.Namespace, pod.Name, container.Name, *allocationInfo) + + m.podResources.insert(podUID, contName, accResourceName, allocationInfo) + + allocatedScalarResourcesQuantity := m.podResources.scalarResourcesQuantity() + + m.mutex.Lock() + m.allocatedScalarResourcesQuantity = allocatedScalarResourcesQuantity + m.mutex.Unlock() + } + } + + // Checkpoints resource to container allocation information. + return m.writeCheckpoint() +} + +// UpdatePluginResources updates node resources based on resources already allocated to pods. +func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + pod := attrs.Pod + + // quick return if no pluginResources requested + if m.podResources.podResources(string(pod.UID)) == nil { + return nil + } + + m.sanitizeNodeAllocatable(node) + return nil +} + +// Register registers a resource plugin. +func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest) (*pluginapi.Empty, error) { + klog.Infof("[qosresourcemanager] Got registration request from resource plugin with resource name %q", r.ResourceName) + metrics.ResourcePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc() + var versionCompatible bool + for _, v := range pluginapi.SupportedVersions { + if r.Version == v { + versionCompatible = true + break + } + } + if !versionCompatible { + errorString := fmt.Sprintf(errUnsupportedVersion, r.Version, pluginapi.SupportedVersions) + klog.Infof("Bad registration request from resource plugin with resource name %q: %s", r.ResourceName, errorString) + return &pluginapi.Empty{}, fmt.Errorf(errorString) + } + + // TODO: for now, always accepts newest resource plugin. Later may consider to + // add some policies here, e.g., verify whether an old resource plugin with the + // same resource name is still alive to determine whether we want to accept + // the new registration. + success := make(chan bool) + go m.addEndpoint(r, success) + select { + case pass := <-success: + if pass { + klog.Infof("[qosresourcemanager] Register resource plugin for %s success", r.ResourceName) + return &pluginapi.Empty{}, nil + } + klog.Errorf("[qosresourcemanager] Register resource plugin for %s fail", r.ResourceName) + return &pluginapi.Empty{}, fmt.Errorf("failed to register resource %s", r.ResourceName) + case <-ctx.Done(): + klog.Errorf("[qosresourcemanager] Register resource plugin for %s timeout", r.ResourceName) + return &pluginapi.Empty{}, fmt.Errorf("timeout to register resource %s", r.ResourceName) + } +} + +// Stop is the function that can stop the gRPC server. +// Can be called concurrently, more than once, and is safe to call +// without a prior Start. +func (m *ManagerImpl) Stop() error { + m.mutex.Lock() + defer m.mutex.Unlock() + for _, eI := range m.endpoints { + eI.e.stop() + } + + if m.server == nil { + return nil + } + m.server.Stop() + m.wg.Wait() + m.server = nil + return nil +} + +func (m *ManagerImpl) registerEndpoint(resourceName string, options *pluginapi.ResourcePluginOptions, e endpoint) { + m.mutex.Lock() + defer m.mutex.Unlock() + + old, ok := m.endpoints[resourceName] + + if ok && !old.e.isStopped() { + klog.V(2).Infof("[qosresourcemanager] stop old endpoint: %v", old.e) + old.e.stop() + } + + m.endpoints[resourceName] = endpointInfo{e: e, opts: options} + klog.V(2).Infof("[qosresourcemanager] Registered endpoint %v", e) +} + +func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest, success chan<- bool) { + new, err := newEndpointImpl(filepath.Join(m.socketdir, r.Endpoint), r.ResourceName) + if err != nil { + klog.Errorf("[qosresourcemanager] Failed to dial resource plugin with request %v: %v", r, err) + success <- false + return + } + m.registerEndpoint(r.ResourceName, r.Options, new) + success <- true +} + +func (m *ManagerImpl) GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) (*pluginapi.GetTopologyAwareResourcesResponse, error) { + var resp *pluginapi.GetTopologyAwareResourcesResponse + + if pod == nil || container == nil { + return nil, fmt.Errorf("GetTopologyAwareResources got nil pod: %v or container: %v", pod, container) + } else if isSkippedPod(pod, false) { + klog.V(4).Infof("[qosresourcemanager] skip pod: %s/%s, container: %s GetTopologyAwareResources", + pod.Namespace, pod.Name, container.Name) + return nil, nil + } + + podUID := string(pod.UID) + containerName := string(container.Name) + + m.mutex.Lock() + for resourceName, eI := range m.endpoints { + if eI.e.isStopped() { + klog.Warningf("[qosresourcemanager] skip GetTopologyAwareResources of resource: %s for pod: %s container: %s, because plugin stopped", + resourceName, podUID, containerName) + continue + } + + ctx := metadata.NewOutgoingContext(context.Background(), metadata.New(nil)) + m.mutex.Unlock() + curResp, err := eI.e.getTopologyAwareResources(ctx, &pluginapi.GetTopologyAwareResourcesRequest{ + PodUid: podUID, + ContainerName: containerName, + }) + m.mutex.Lock() + + if err != nil { + m.mutex.Unlock() + //[TODO](sunjianyu): to discuss if we should return err if only one resource plugin gets error? + return nil, fmt.Errorf("getTopologyAwareResources for resource: %s failed with error: %v", resourceName, err) + } else if curResp == nil { + klog.Warningf("[qosresourcemanager] getTopologyAwareResources of resource: %s for pod: %s container: %s, got nil response but without error", + resourceName, podUID, containerName) + continue + } + + if resp == nil { + resp = curResp + + if resp.ContainerTopologyAwareResources == nil { + resp.ContainerTopologyAwareResources = &pluginapi.ContainerTopologyAwareResources{ + ContainerName: containerName, + } + } + + if resp.ContainerTopologyAwareResources.AllocatedResources == nil { + resp.ContainerTopologyAwareResources.AllocatedResources = make(map[string]*pluginapi.TopologyAwareResource) + } + } else if curResp.ContainerTopologyAwareResources != nil && curResp.ContainerTopologyAwareResources.AllocatedResources != nil { + for resourceName, topologyAwareResource := range curResp.ContainerTopologyAwareResources.AllocatedResources { + if topologyAwareResource != nil { + resp.ContainerTopologyAwareResources.AllocatedResources[resourceName] = proto.Clone(topologyAwareResource).(*pluginapi.TopologyAwareResource) + } + } + } else { + klog.Warningf("[qosresourcemanager] getTopologyAwareResources of resource: %s for pod: %s container: %s, get nil resp or nil topologyAwareResources in resp", + resourceName, podUID, containerName) + } + } + m.mutex.Unlock() + return resp, nil +} + +func (m *ManagerImpl) GetTopologyAwareAllocatableResources() (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) { + var resp *pluginapi.GetTopologyAwareAllocatableResourcesResponse + + m.mutex.Lock() + for resourceName, eI := range m.endpoints { + if eI.e.isStopped() { + klog.Warningf("[qosresourcemanager] skip GetTopologyAwareAllocatableResources of resource: %s, because plugin stopped", resourceName) + continue + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.New(nil)) + m.mutex.Unlock() + curResp, err := eI.e.getTopologyAwareAllocatableResources(ctx, &pluginapi.GetTopologyAwareAllocatableResourcesRequest{}) + m.mutex.Lock() + + if err != nil { + m.mutex.Unlock() + //[TODO](sunjianyu): to discuss if we should return err if only one resource plugin gets error? + return nil, fmt.Errorf("getTopologyAwareAllocatableResources for resource: %s failed with error: %v", resourceName, err) + } else if curResp == nil { + klog.Warningln("[qosresourcemanager] getTopologyAwareAllocatableResources got nil response but without error") + continue + } + + if resp == nil { + resp = curResp + + if resp.AllocatableResources == nil { + resp.AllocatableResources = make(map[string]*pluginapi.AllocatableTopologyAwareResource) + } + } else if curResp.AllocatableResources != nil { + for resourceName, topologyAwareResource := range curResp.AllocatableResources { + if topologyAwareResource != nil { + resp.AllocatableResources[resourceName] = proto.Clone(topologyAwareResource).(*pluginapi.AllocatableTopologyAwareResource) + } + } + } else { + klog.Warningf("[qosresourcemanager] getTopologyAwareAllocatableResources of resource: %s, get nil resp or nil topologyAwareResources in resp", resourceName) + } + } + m.mutex.Unlock() + return resp, nil +} + +// GetCapacity is expected to be called when Kubelet updates its node status. +// The first returned variable contains the registered resource plugin resource capacity. +// The second returned variable contains the registered resource plugin resource allocatable. +// The third returned variable contains previously registered resources that are no longer active. +// Kubelet uses this information to update resource capacity/allocatable in its node status. +// After the call, resource plugin can remove the inactive resources from its internal list as the +// change is already reflected in Kubelet node status. +// Note in the special case after Kubelet restarts, resource plugin resource capacities can +// temporarily drop to zero till corresponding resource plugins re-register. This is OK because +// cm.UpdatePluginResource() run during predicate Admit guarantees we adjust nodeinfo +// capacity for already allocated pods so that they can continue to run. However, new pods +// requiring resource plugin resources will not be scheduled till resource plugin re-registers. +func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) { + var capacity = v1.ResourceList{} + var allocatable = v1.ResourceList{} + deletedResources := sets.NewString() + m.mutex.Lock() + // [TODO](sunjianyu): consider we need diff capacity and allocatable here? + for resourceName, eI := range m.endpoints { + implicitIsNodeResource := m.isNodeResource(resourceName) + + if eI.e.stopGracePeriodExpired() { + if !implicitIsNodeResource { + klog.Infof("[qosresourcemanager] skip GetCapacity for resource: %s with implicitIsNodeResource: %v", resourceName, implicitIsNodeResource) + continue + } + delete(m.endpoints, resourceName) + deletedResources.Insert(resourceName) + } else { + ctx := metadata.NewOutgoingContext(context.Background(), metadata.New(nil)) + m.mutex.Unlock() + resp, err := eI.e.getTopologyAwareAllocatableResources(ctx, &pluginapi.GetTopologyAwareAllocatableResourcesRequest{}) + m.mutex.Lock() + if err != nil { + klog.Errorf("[qosresourcemanager] getTopologyAwareAllocatableResources for resource: %s failed with error: %v", resourceName, err) + if !implicitIsNodeResource { + klog.Infof("[qosresourcemanager] skip GetCapacity for resource: %s with implicitIsNodeResource: %v", resourceName, implicitIsNodeResource) + continue + } + // [TODO](sunjianyu): consider if it will cause resource quantity vibrating? + capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(0, resource.DecimalSI) + allocatable[v1.ResourceName(resourceName)] = *resource.NewQuantity(0, resource.DecimalSI) + } else if resp == nil || + resp.AllocatableResources == nil || + len(resp.AllocatableResources) == 0 { + klog.Warningf("[qosresourcemanager] getTopologyAwareAllocatableResources for resource: %s got nil response or empty content in response", resourceName) + if !implicitIsNodeResource { + klog.Infof("[qosresourcemanager] skip GetCapacity for resource: %s with implicitIsNodeResource: %v", resourceName, implicitIsNodeResource) + continue + } + capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(0, resource.DecimalSI) + allocatable[v1.ResourceName(resourceName)] = *resource.NewQuantity(0, resource.DecimalSI) + } else { + for accResourceName, taResource := range resp.AllocatableResources { + + if taResource == nil { + klog.Errorf("[qosresourcemanager] accResourceName: %s with nil topology aware resource", accResourceName) + capacity[v1.ResourceName(accResourceName)] = *resource.NewQuantity(0, resource.DecimalSI) + allocatable[v1.ResourceName(accResourceName)] = *resource.NewQuantity(0, resource.DecimalSI) + continue + } + + if taResource.IsNodeResource && taResource.IsScalarResource { + aggregatedAllocatableQuantity, _ := resource.ParseQuantity(fmt.Sprintf("%.3f", taResource.AggregatedAllocatableQuantity)) + aggregatedCapacityQuantity, _ := resource.ParseQuantity(fmt.Sprintf("%.3f", taResource.AggregatedCapacityQuantity)) + allocatable[v1.ResourceName(accResourceName)] = aggregatedAllocatableQuantity + capacity[v1.ResourceName(accResourceName)] = aggregatedCapacityQuantity + } + } + } + } + } + m.mutex.Unlock() + return capacity, allocatable, deletedResources.UnsortedList() +} + +// Checkpoints resource to container allocation information to disk. +func (m *ManagerImpl) writeCheckpoint() error { + data := checkpoint.New(m.podResources.toCheckpointData()) + err := m.checkpointManager.CreateCheckpoint(kubeletQoSResourceManagerCheckpoint, data) + if err != nil { + err2 := fmt.Errorf("[qosresourcemanager] failed to write checkpoint file %q: %v", kubeletQoSResourceManagerCheckpoint, err) + klog.Warning(err2) + return err2 + } + return nil +} + +// Reads resource to container allocation information from disk, and populates +// m.allocatedScalarResourcesQuantity accordingly. +func (m *ManagerImpl) readCheckpoint() error { + resEntries := make([]checkpoint.PodResourcesEntry, 0) + cp := checkpoint.New(resEntries) + err := m.checkpointManager.GetCheckpoint(kubeletQoSResourceManagerCheckpoint, cp) + if err != nil { + if err == errors.ErrCheckpointNotFound { + klog.Warningf("[qosresourcemanager] Failed to retrieve checkpoint for %q: %v", kubeletQoSResourceManagerCheckpoint, err) + return nil + } + return err + } + + podResources := cp.GetData() + m.podResources.fromCheckpointData(podResources) + allocatedScalarResourcesQuantity := m.podResources.scalarResourcesQuantity() + + m.mutex.Lock() + m.allocatedScalarResourcesQuantity = allocatedScalarResourcesQuantity + + allocatedResourceNames := m.podResources.allAllocatedResourceNames() + + for _, allocatedResourceName := range allocatedResourceNames.UnsortedList() { + m.endpoints[allocatedResourceName] = endpointInfo{e: newStoppedEndpointImpl(allocatedResourceName), opts: nil} + } + + m.mutex.Unlock() + + return nil +} + +// UpdateAllocatedResources frees any Resources that are bound to terminated pods. +func (m *ManagerImpl) UpdateAllocatedResources() { + activePods := m.activePods() + if !m.sourcesReady.AllReady() { + return + } + podsToBeRemoved := m.podResources.pods() + for _, pod := range activePods { + podsToBeRemoved.Delete(string(pod.UID)) + } + if len(podsToBeRemoved) <= 0 { + return + } + + podsToBeRemovedList := podsToBeRemoved.UnsortedList() + klog.V(3).Infof("[qosresourcemanager] pods to be removed: %v", podsToBeRemovedList) + + m.mutex.Lock() + for _, podUID := range podsToBeRemovedList { + + allSuccess := true + for resourceName, eI := range m.endpoints { + if eI.e.isStopped() { + klog.Warningf("[qosresourcemanager] skip removePods: %+v of resource: %s, because plugin stopped", podsToBeRemovedList, resourceName) + continue + } + + ctx := metadata.NewOutgoingContext(context.Background(), metadata.New(nil)) + m.mutex.Unlock() + _, err := eI.e.removePod(ctx, &pluginapi.RemovePodRequest{ + PodUid: podUID, + }) + m.mutex.Lock() + + if err != nil { + allSuccess = false + klog.Errorf("[qosresourcemanager.UpdateAllocatedResources] remove pod: %s in %s endpoint failed with error: %v", podUID, resourceName, err) + } + } + + if allSuccess { + m.podResources.deletePod(podUID) + } else { + klog.Warningf("[qosresourcemanager.UpdateAllocatedResources] pod: %s should be deleted, but it's not removed in all plugins, so keep it temporarily", podUID) + } + } + m.mutex.Unlock() + + err := m.writeCheckpoint() + + if err != nil { + klog.Errorf("[qosresourcemanager.UpdateAllocatedResources] write checkpoint failed with error: %v", err) + } + + // Regenerated allocatedScalarResourcesQuantity after we update pod allocation information. + allocatedScalarResourcesQuantity := m.podResources.scalarResourcesQuantity() + m.mutex.Lock() + m.allocatedScalarResourcesQuantity = allocatedScalarResourcesQuantity + m.mutex.Unlock() +} + +// getMappedResourceName returns mapped resource name of input "resourceName" in m.resourceNamesMap if there is the mapping entry, +// or it will return input "resourceName". +// If both the input "resourceName" and the mapped resource name are requested, it will return error. +func (m *ManagerImpl) getMappedResourceName(resourceName string, requests v1.ResourceList) (string, error) { + if _, found := m.resourceNamesMap[resourceName]; !found { + return resourceName, nil + } + + mappedResourceName := m.resourceNamesMap[resourceName] + + _, foundReq := requests[v1.ResourceName(resourceName)] + _, foundMappedReq := requests[v1.ResourceName(mappedResourceName)] + + if foundReq && foundMappedReq { + return mappedResourceName, fmt.Errorf("both %s and mapped %s are requested", resourceName, mappedResourceName) + } + + klog.Infof("[qosresourcemanager.getMappedResourceName] map resource name: %s to %s", resourceName, mappedResourceName) + + return mappedResourceName, nil +} + +// GetResourceRunContainerOptions checks whether we have cached containerResources +// for the passed-in and returns its ResourceRunContainerOptions +// for the found one. An empty struct is returned in case no cached state is found. +func (m *ManagerImpl) GetResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) { + if pod == nil || container == nil { + return nil, fmt.Errorf("GetResourceRunContainerOptions got nil pod: %v or container: %v", pod, container) + } else if isSkippedPod(pod, true) { + klog.V(4).Infof("[qosresourcemanager] skip pod: %s/%s, container: %s resource allocation", + pod.Namespace, pod.Name, container.Name) + return nil, nil + } + + podUID := string(pod.UID) + contName := container.Name + + resources := m.podResources.containerAllResources(podUID, contName) + + // [TODO](sunjianyu): for accompanying resources, we may support request those resources in annotation later + // think about a parent resource name with accompanying resources, + // we must return the result of the parent resource to aviod reallocating. + needsReAllocate := false + for k, v := range container.Resources.Requests { + resourceName := string(k) + + resourceName, err := m.getMappedResourceName(resourceName, container.Resources.Requests) + + if err != nil { + return nil, fmt.Errorf("getMappedResourceName failed with error: %v", err) + } + + if !m.isResourcePluginResource(resourceName) { + continue + } + if v.Value() == 0 { + continue + } + err = m.callPreStartContainerIfNeeded(pod, container, resourceName) + if err != nil { + return nil, err + } + + // This is a resource plugin resource yet we don't have cached + // resource state. This is likely due to a race during node + // restart. We re-issue allocate request to cover this race. + if resources[resourceName] == nil { + needsReAllocate = true + } + } + if needsReAllocate && !isSkippedContainer(pod, container) { + klog.V(2).Infof("[qosresourcemanager] needs re-allocate resource plugin resources for pod %s, container %s during GetResourceRunContainerOptions", podUID, container.Name) + if err := m.reAllocate(pod, container); err != nil { + return nil, err + } + } + + return m.podResources.resourceRunContainerOptions(string(pod.UID), container.Name) +} + +// callPreStartContainerIfNeeded issues PreStartContainer grpc call for resource plugin resource +// with PreStartRequired option set. +func (m *ManagerImpl) callPreStartContainerIfNeeded(pod *v1.Pod, container *v1.Container, resource string) error { + m.mutex.Lock() + eI, ok := m.endpoints[resource] + if !ok { + m.mutex.Unlock() + return fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource) + } + + if eI.opts == nil || !eI.opts.PreStartRequired { + m.mutex.Unlock() + klog.V(4).Infof("[qosresourcemanager] resource plugin options indicate to skip PreStartContainer for resource: %s", resource) + return nil + } + + m.mutex.Unlock() + klog.V(4).Infof("[qosresourcemanager] Issuing an PreStartContainer call for container, %s, of pod %s", container.Name, pod.Name) + _, err := eI.e.preStartContainer(pod, container) + if err != nil { + return fmt.Errorf("resource plugin PreStartContainer rpc failed with err: %v", err) + } + // TODO: Add metrics support for init RPC + return nil +} + +// sanitizeNodeAllocatable scans through allocatedScalarResourcesQuantity in the qos resource manager +// and if necessary, updates allocatableResource in nodeInfo to at least equal to +// the allocated capacity. This allows pods that have already been scheduled on +// the node to pass GeneralPredicates admission checking even upon resource plugin failure. +func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulerframework.NodeInfo) { + + var newAllocatableResource *schedulerframework.Resource + allocatableResource := node.Allocatable + if allocatableResource.ScalarResources == nil { + allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) + } + + m.mutex.Lock() + defer m.mutex.Unlock() + for resource, allocatedQuantity := range m.allocatedScalarResourcesQuantity { + quant, ok := allocatableResource.ScalarResources[v1.ResourceName(resource)] + if ok && float64(quant) >= allocatedQuantity { + continue + } + // Needs to update nodeInfo.AllocatableResource to make sure + // NodeInfo.allocatableResource at least equal to the capacity already allocated. + if newAllocatableResource == nil { + newAllocatableResource = allocatableResource.Clone() + } + newAllocatableResource.ScalarResources[v1.ResourceName(resource)] = int64(math.Ceil(allocatedQuantity)) + } + if newAllocatableResource != nil { + node.Allocatable = newAllocatableResource + } +} + +func (m *ManagerImpl) isResourcePluginResource(resource string) bool { + m.mutex.Lock() + _, registeredResource := m.endpoints[resource] + m.mutex.Unlock() + + if registeredResource { + return true + } + + allocatedResourceNames := m.podResources.allAllocatedResourceNames() + return allocatedResourceNames.Has(resource) +} + +// ShouldResetExtendedResourceCapacity returns whether the extended resources should be zeroed or not, +// depending on whether the node has been recreated. Absence of the checkpoint file strongly indicates the node +// has been recreated. +// since QRM isn't responsible for extended resources now, we just return false directly. +// for the future, we shoud think about identify resource name from QRM and device manager and reset them respectively. +func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool { + //if utilfeature.DefaultFeatureGate.Enabled(features.QoSResourceManager) { + // checkpoints, err := m.checkpointManager.ListCheckpoints() + // if err != nil { + // return false + // } + // return len(checkpoints) == 0 + //} + return false +} + +func (m *ManagerImpl) reconcileState() { + klog.Infof("[qosresourcemanager.reconcileState] reconciling") + + m.UpdateAllocatedResources() + + activePods := m.activePods() + + resourceAllocationResps := make(map[string]*pluginapi.GetResourcesAllocationResponse) + + m.mutex.Lock() + + for resourceName, eI := range m.endpoints { + if eI.e.isStopped() { + klog.Warningf("[qosresourcemanager.reconcileState] skip getResourceAllocation of resource: %s, because plugin stopped", resourceName) + continue + } else if !eI.opts.NeedReconcile { + klog.V(6).Infof("[qosresourcemanager.reconcileState] skip getResourceAllocation of resource: %s, because plugin needn't reconciling", resourceName) + continue + } + + ctx := metadata.NewOutgoingContext(context.Background(), metadata.New(nil)) + m.mutex.Unlock() + resp, err := eI.e.getResourceAllocation(ctx, &pluginapi.GetResourcesAllocationRequest{}) + m.mutex.Lock() + + if err != nil { + klog.Errorf("[qosresourcemanager.reconcileState] getResourceAllocation to %s endpoint failed with error: %v", resourceName, err) + continue + } + + resourceAllocationResps[resourceName] = resp + } + m.mutex.Unlock() + + for _, pod := range activePods { + if pod == nil { + continue + } else if isSkippedPod(pod, false) { + klog.V(4).Infof("[qosresourcemanager] skip active pod: %s/%s reconcile", pod.Namespace, pod.Name) + continue + } + + pstatus, ok := m.podStatusProvider.GetPodStatus(pod.UID) + if !ok { + klog.Warningf("[qosresourcemanager.reconcileState] reconcileState: skipping pod; status not found (pod: %s/%s)", pod.Namespace, pod.Name) + continue + } + + nContainers := len(pod.Spec.Containers) + containersLoop: + for i := 0; i < nContainers; i++ { + podUID := string(pod.UID) + containerName := pod.Spec.Containers[i].Name + + containerID, err := findContainerIDByName(&pstatus, containerName) + if err != nil { + klog.Warningf("[qosresourcemanager.reconcileState] reconcileState: skipping container; ID not found in pod status (pod: %s/%s, container: %s, error: %v)", + pod.Namespace, pod.Name, containerName, err) + continue + } + + needsReAllocate := false + for resourceName, resp := range resourceAllocationResps { + if resp == nil { + klog.Warningf("[qosresourcemanager.reconcileState] resource: %s got nil resourceAllocationResp", resourceName) + continue + } + + isRequested, err := m.isContainerRequestResource(&pod.Spec.Containers[i], resourceName) + + if err != nil { + klog.Errorf("[qosresourcemanager.reconcileState] isContainerRequestResource failed with error: %v", err) + continue containersLoop + } + + if isRequested { + if resp.PodResources[podUID] != nil && resp.PodResources[podUID].ContainerResources[containerName] != nil { + resourceAllocations := resp.PodResources[podUID].ContainerResources[containerName] + for resourceName, resourceAllocationInfo := range resourceAllocations.ResourceAllocation { + m.podResources.insert(podUID, containerName, resourceName, resourceAllocationInfo) + } + } else { + // container requests the resource, but the corresponding endpoint hasn't record for the container + needsReAllocate = true + // delete current resource allocation for the container to avoid influencing re-allocation + m.podResources.deleteResourceAllocationInfo(podUID, containerName, resourceName) + } + } + } + + if needsReAllocate && !isSkippedContainer(pod, &pod.Spec.Containers[i]) { + klog.Infof("[qosresourcemanager] needs re-allocate resource plugin resources for pod %s/%s, container %s during reconcileState", + pod.Namespace, pod.Name, containerName) + if err := m.reAllocate(pod, &pod.Spec.Containers[i]); err != nil { + klog.Errorf("[qosresourcemanager] re-allocate resource plugin resources for pod %s/%s, container %s during reconcileState failed with error: %v", + pod.Namespace, pod.Name, containerName, err) + continue + } + } + + err = m.updateContainerResources(podUID, containerName, containerID) + if err != nil { + klog.Errorf("[qosresourcemanager.reconcileState] pod: %s/%s, container: %s, updateContainerResources failed with error: %v", + pod.Namespace, pod.Name, containerName, err) + continue + } else { + klog.Infof("[qosresourcemanager.reconcileState] pod: %s/%s, container: %s, reconcile state successfully", + pod.Namespace, pod.Name, containerName) + } + } + } + + // write checkpoint periodically in reconcileState function, to keep syncing podResources in memory to checkpoint file. + err := m.writeCheckpoint() + + if err != nil { + klog.Errorf("[qosresourcemanager.reconcileState] write checkpoint failed with error: %v", err) + } +} + +func (m *ManagerImpl) updateContainerResources(podUID, containerName, containerID string) error { + opts, err := m.podResources.resourceRunContainerOptions(podUID, containerName) + + if err != nil { + return fmt.Errorf("updateContainerResources failed with error: %v", err) + } else if opts == nil { + klog.Warningf("[qosresourcemanager.updateContainerResources] there is no resources opts for pod: %s, container: %s", + podUID, containerName) + return nil + } + + return m.containerRuntime.UpdateContainerResources( + containerID, + opts.Resources) +} + +func (m *ManagerImpl) isNodeResource(resourceName string) bool { + allocatedNodeResourceNames := m.podResources.allAllocatedNodeResourceNames() + allocatedResourceNames := m.podResources.allAllocatedResourceNames() + + if allocatedNodeResourceNames.Has(resourceName) { + return true + } else if allocatedResourceNames.Has(resourceName) { + return false + } + + // currently we think we only report quantity for scalar resource to node, + // if there is no allocation record declaring it as node resource explicitly. + return schedutil.IsScalarResourceName(v1.ResourceName(resourceName)) +} diff --git a/pkg/kubelet/cm/qosresourcemanager/manager_stub.go b/pkg/kubelet/cm/qosresourcemanager/manager_stub.go new file mode 100644 index 0000000000000..11c66ab2f45f7 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/manager_stub.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qosresourcemanager + +import ( + v1 "k8s.io/api/core/v1" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" + "k8s.io/kubernetes/pkg/kubelet/status" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +// ManagerStub provides a simple stub implementation for the Resource Manager. +type ManagerStub struct{} + +// NewManagerStub creates a ManagerStub. +func NewManagerStub() (Manager, error) { + return &ManagerStub{}, nil +} + +// Start simply returns nil. +func (h *ManagerStub) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podstatusprovider status.PodStatusProvider, containerRuntime runtimeService) error { + return nil +} + +// Stop simply returns nil. +func (h *ManagerStub) Stop() error { + return nil +} + +// Allocate simply returns nil. +func (h *ManagerStub) Allocate(pod *v1.Pod, container *v1.Container) error { + return nil +} + +// UpdatePluginResources simply returns nil. +func (h *ManagerStub) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + return nil +} + +// GetResourceRunContainerOptions simply returns nil, nil. +func (h *ManagerStub) GetResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) { + return nil, nil +} + +// GetCapacity simply returns nil capacity and empty removed resource list. +func (h *ManagerStub) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} +} + +// GetWatcherHandler returns plugin watcher interface +func (h *ManagerStub) GetWatcherHandler() cache.PluginHandler { + return nil +} + +// GetTopologyHints returns an empty TopologyHint map +func (h *ManagerStub) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { + return map[string][]topologymanager.TopologyHint{} +} + +// GetPodTopologyHints returns an empty TopologyHint map +func (h *ManagerStub) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { + return map[string][]topologymanager.TopologyHint{} +} + +// ShouldResetExtendedResourceCapacity returns false +func (h *ManagerStub) ShouldResetExtendedResourceCapacity() bool { + return false +} + +// GetTopologyAwareResources returns nil, nil +func (h *ManagerStub) GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) (*pluginapi.GetTopologyAwareResourcesResponse, error) { + return nil, nil +} + +// GetTopologyAwareAllocatableResources returns nil, nil +func (h *ManagerStub) GetTopologyAwareAllocatableResources() (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) { + return nil, nil +} + +// UpdateAllocatedResources frees any resources that are bound to terminated pods. +func (h *ManagerStub) UpdateAllocatedResources() { +} diff --git a/pkg/kubelet/cm/qosresourcemanager/manager_test.go b/pkg/kubelet/cm/qosresourcemanager/manager_test.go new file mode 100644 index 0000000000000..f6bb9a8c1f5a8 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/manager_test.go @@ -0,0 +1,1363 @@ +package qosresourcemanager + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/require" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/tools/record" + apitest "k8s.io/cri-api/pkg/apis/testing" + "k8s.io/klog/v2" + watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" + "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/pluginmanager" + "k8s.io/kubernetes/pkg/kubelet/status" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +const ( + testResourceName = "mock_res" + PreStartAnnotationKey = "prestartCalled" + PreStartAnnotationValue = "true" +) + +func tmpSocketDir() (socketDir, socketName, pluginSocketName string, err error) { + socketDir, err = ioutil.TempDir("", "qrm") + if err != nil { + return + } + socketName = path.Join(socketDir, "server.sock") + pluginSocketName = path.Join(socketDir, "qrm-plugin.sock") + _ = os.MkdirAll(socketDir, 0755) + return +} + +func TestNewManagerImpl(t *testing.T) { + socketDir, socketName, _, err := tmpSocketDir() + topologyStore := topologymanager.NewFakeManager() + require.NoError(t, err) + defer os.RemoveAll(socketDir) + + _, err = newManagerImpl(socketName, topologyStore, time.Second, nil) + require.NoError(t, err) +} + +func TestNewManagerImplStart(t *testing.T) { + socketDir, socketName, pluginSocketName, err := tmpSocketDir() + require.NoError(t, err) + defer os.RemoveAll(socketDir) + + m, p := setup(t, socketName, pluginSocketName) + + // ensures register successful after start server and plugin + err = p.Register(socketName, testResourceName, socketDir) + require.Nil(t, err) + _, exists := m.endpoints[testResourceName] + require.True(t, exists) + + cleanup(t, m, p, nil) + // Stop should tolerate being called more than once + cleanup(t, m, p, nil) +} + +func TestNewManagerImplStartProbeMode(t *testing.T) { + socketDir, socketName, pluginSocketName, err := tmpSocketDir() + require.NoError(t, err) + defer os.RemoveAll(socketDir) + + m, p, _, stopCh := setupInProbeMode(t, socketName, pluginSocketName) + // make plugin register to QRM automatically by plugin manager + time.Sleep(time.Second) + _, exists := m.endpoints[testResourceName] + require.True(t, exists) + cleanup(t, m, p, stopCh) +} + +type MockEndpoint struct { + allocateFunc func(resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) + topologyAllocatable func(ctx context.Context, request *pluginapi.GetTopologyAwareAllocatableResourcesRequest) (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) + topologyAllocated func(ctx context.Context, request *pluginapi.GetTopologyAwareResourcesRequest) (*pluginapi.GetTopologyAwareResourcesResponse, error) + topologyHints func(ctx context.Context, resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceHintsResponse, error) + resourceAlloc func(ctx context.Context, request *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) + initChan chan []string + stopTime time.Time +} + +func (m *MockEndpoint) stop() { + m.stopTime = time.Now() +} +func (m *MockEndpoint) run(success chan<- bool) {} + +func (m *MockEndpoint) preStartContainer(pod *v1.Pod, container *v1.Container) (*pluginapi.PreStartContainerResponse, error) { + //m.initChan <- devs + if pod == nil || container == nil { + return nil, fmt.Errorf("preStartContainer met nil pod or container") + } + + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + pod.Annotations[PreStartAnnotationKey] = PreStartAnnotationValue + + return &pluginapi.PreStartContainerResponse{}, nil +} + +func (m *MockEndpoint) allocate(ctx context.Context, resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + if m.isStopped() { + return nil, fmt.Errorf(errEndpointStopped, m) + } + if m.allocateFunc != nil { + return m.allocateFunc(resourceRequest) + } + return nil, nil +} + +func (m *MockEndpoint) isStopped() bool { + return !m.stopTime.IsZero() +} + +var SGP int = 0 + +func (m *MockEndpoint) stopGracePeriodExpired() bool { + if SGP == 0 { + return false + } else { + return true + } +} + +func (m *MockEndpoint) getTopologyHints(ctx context.Context, request *pluginapi.ResourceRequest) (*pluginapi.ResourceHintsResponse, error) { + if m.isStopped() { + return nil, fmt.Errorf("plugin stopped") + } + if m.topologyHints != nil { + return m.topologyHints(ctx, request) + } + return nil, nil +} + +func (m *MockEndpoint) getTopologyAwareAllocatableResources(ctx context.Context, request *pluginapi.GetTopologyAwareAllocatableResourcesRequest) (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) { + if m.isStopped() { + return nil, fmt.Errorf("plugin stopped") + } + if m.topologyAllocatable != nil { + return m.topologyAllocatable(ctx, request) + } + return nil, nil +} + +func (m *MockEndpoint) getTopologyAwareResources(ctx context.Context, request *pluginapi.GetTopologyAwareResourcesRequest) (*pluginapi.GetTopologyAwareResourcesResponse, error) { + if m.isStopped() { + return nil, fmt.Errorf("plugin stopped") + } + if m.topologyAllocated != nil { + return m.topologyAllocated(ctx, request) + } + return nil, nil +} + +func (m *MockEndpoint) removePod(ctx context.Context, removePodRequest *pluginapi.RemovePodRequest) (*pluginapi.RemovePodResponse, error) { + return nil, nil +} + +func (m *MockEndpoint) getResourceAllocation(ctx context.Context, request *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) { + if m.resourceAlloc != nil { + return m.resourceAlloc(ctx, request) + } + return nil, nil +} + +func makePod(name string, rl v1.ResourceList) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: uuid.NewUUID(), + Name: name, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: name, + Resources: v1.ResourceRequirements{ + Requests: rl.DeepCopy(), + Limits: rl.DeepCopy(), + }, + }, + }, + }, + } +} + +type TestResource struct { + resourceName string + resourceQuantity resource.Quantity +} + +type activePodsStub struct { + activePods []*v1.Pod +} + +func (a *activePodsStub) getActivePods() []*v1.Pod { + return a.activePods +} + +func (a *activePodsStub) updateActivePods(newPods []*v1.Pod) { + a.activePods = newPods +} + +func TestManagerAllocate(t *testing.T) { + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + res3 := TestResource{ + resourceName: "domain3.com/resource3", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + + testResources := []TestResource{ + res1, + res2, + res3, + } + + as := require.New(t) + + testPods := []*v1.Pod{ + makePod("Pod0", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity, + v1.ResourceName(res2.resourceName): res2.resourceQuantity}), + makePod("Pod1", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}), + makePod("Pod2", v1.ResourceList{ + v1.ResourceName(res2.resourceName): res2.resourceQuantity}), + makePod("Pod3", v1.ResourceList{ + v1.ResourceName(res3.resourceName): res3.resourceQuantity}), + makePod("Pod4", v1.ResourceList{ + v1.ResourceName(res3.resourceName): res3.resourceQuantity}), + } + + podsStub := activePodsStub{ + activePods: testPods, + } + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + + // test skipped pod + testPods[1].OwnerReferences = []metav1.OwnerReference{ + {Kind: DaemonsetKind}, + } + err = testManager.Allocate(testPods[1], &testPods[1].Spec.Containers[0]) + // runContainerOpts1 and err are both nil + runContainerOpts1, err := testManager.GetResourceRunContainerOptions(testPods[1], &testPods[1].Spec.Containers[0]) + as.Nil(err) + as.Nil(runContainerOpts1) + // resourceAllocation1 is nil + resourceAllocation1 := testManager.podResources.containerResource(string(testPods[1].UID), testPods[1].Spec.Containers[0].Name, res1.resourceName) + as.Nil(resourceAllocation1) + + // remove owner reference + testPods[1].OwnerReferences = nil + + // endpoint isStopped + e1 := testManager.endpoints[res1.resourceName].e + e1.stop() + setPodAnnotation(testPods[1], pluginapi.KatalystQoSLevelAnnotationKey, pluginapi.KatalystQoSLevelReclaimedCores) + err = testManager.Allocate(testPods[1], &testPods[1].Spec.Containers[0]) + as.NotNil(err) + klog.Errorf("stopped endpoint allocation error: %v", err) + + // resume stopped endpoint + registerEndpointByRes(testManager, testResources) + + // container resource allocated >= need + err = testManager.Allocate(testPods[0], &testPods[0].Spec.Containers[0]) + as.Nil(err) + // res1 and res2 in resourceAllocation0 are both not nil and with correct values + resourceAllocation0 := testManager.podResources.containerAllResources(string(testPods[0].UID), testPods[0].Spec.Containers[0].Name) + as.NotNil(resourceAllocation0) + as.NotNil(resourceAllocation0[res1.resourceName]) + as.NotNil(resourceAllocation0[res2.resourceName]) + as.Equal(float64(res1.resourceQuantity.Value()), resourceAllocation0[res1.resourceName].AllocatedQuantity) + as.Equal(float64(res2.resourceQuantity.Value()), resourceAllocation0[res2.resourceName].AllocatedQuantity) + // res1 req 2 -> 1 + testPods[0].Spec.Containers[0].Resources.Requests[v1.ResourceName(res1.resourceName)] = *resource.NewQuantity(int64(1), resource.DecimalSI) + err = testManager.Allocate(testPods[0], &testPods[0].Spec.Containers[0]) + as.Nil(err) + res1Allocation := testManager.podResources.containerResource(string(testPods[0].UID), testPods[0].Spec.Containers[0].Name, res1.resourceName) + as.NotNil(res1Allocation) + // result is still 2 + as.Equal(float64(res1.resourceQuantity.Value()), res1Allocation.AllocatedQuantity) + + // container resource allocated < need + err = testManager.Allocate(testPods[2], &testPods[2].Spec.Containers[0]) + as.Nil(err) + resourceAllocation2 := testManager.podResources.containerResource(string(testPods[2].UID), testPods[2].Spec.Containers[0].Name, res2.resourceName) + as.Equal(float64(res2.resourceQuantity.Value()), resourceAllocation2.AllocatedQuantity) + // res2 req 2 -> 3 + testPods[2].Spec.Containers[0].Resources.Requests[v1.ResourceName(res2.resourceName)] = *resource.NewQuantity(int64(3), resource.DecimalSI) + err = testManager.Allocate(testPods[2], &testPods[2].Spec.Containers[0]) + as.Nil(err) + resourceAllocation2 = testManager.podResources.containerResource(string(testPods[2].UID), testPods[2].Spec.Containers[0].Name, res2.resourceName) + as.Equal(float64(3), resourceAllocation2.AllocatedQuantity) + + // test skip endpoint error + err = testManager.Allocate(testPods[3], &testPods[3].Spec.Containers[0]) + as.Nil(err) + + // test not skipping endpoint error + setPodAnnotation(testPods[4], pluginapi.KatalystQoSLevelAnnotationKey, pluginapi.KatalystQoSLevelReclaimedCores) + err = testManager.Allocate(testPods[4], &testPods[4].Spec.Containers[0]) + as.NotNil(err) +} + +func setPodAnnotation(pod *v1.Pod, key, value string) { + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + pod.Annotations[key] = value +} + +func setPodLabel(pod *v1.Pod, key, value string) { + if pod.Annotations == nil { + pod.Labels = make(map[string]string) + } + + pod.Labels[key] = value +} + +func TestReconcileState(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + testResources := []TestResource{ + res1, + } + + testPods := []*v1.Pod{ + makePod("Pod0", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}), + } + podsStub := activePodsStub{ + activePods: testPods, + } + + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + err = testManager.Allocate(testPods[0], &testPods[0].Spec.Containers[0]) + as.Nil(err) + resp := testManager.podResources.containerResource(string(testPods[0].UID), testPods[0].Spec.Containers[0].Name, res1.resourceName) + as.NotNil(resp) + as.Equal(float64(res1.resourceQuantity.Value()), resp.AllocatedQuantity) + + rs := &apitest.FakeRuntimeService{} + mockStatus := getMockStatusWithPods(t, testPods) + testManager.containerRuntime = rs + testManager.podStatusProvider = mockStatus + + // override resourceAlloc to reply to reconcile + testManager.registerEndpoint(res1.resourceName, &pluginapi.ResourcePluginOptions{ + NeedReconcile: true, + }, &MockEndpoint{ + resourceAlloc: func(ctx context.Context, request *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) { + resp := new(pluginapi.GetResourcesAllocationResponse) + resp.PodResources = make(map[string]*pluginapi.ContainerResources) + resp.PodResources[string(testPods[0].UID)] = new(pluginapi.ContainerResources) + resp.PodResources[string(testPods[0].UID)].ContainerResources = make(map[string]*pluginapi.ResourceAllocation) + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name] = new(pluginapi.ResourceAllocation) + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation = make(map[string]*pluginapi.ResourceAllocationInfo) + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName] = new(pluginapi.ResourceAllocationInfo) + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName].IsNodeResource = true + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName].IsScalarResource = true + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName].AllocatedQuantity = 1 + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName].Envs = make(map[string]string) + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName].Envs["kk1"] = "vv1" + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName].Annotations = make(map[string]string) + resp.PodResources[string(testPods[0].UID)].ContainerResources[testPods[0].Spec.Containers[0].Name].ResourceAllocation[res1.resourceName].Annotations["A1"] = "AA1" + return resp, nil + }, + allocateFunc: allocateStubFunc(), + }) + + testManager.reconcileState() + + // ensures containerResource matches with reconcile result and runtime UpdateContainerResources must be called + resp = testManager.podResources.containerResource(string(testPods[0].UID), testPods[0].Spec.Containers[0].Name, res1.resourceName) + as.NotNil(resp) + as.Equal(float64(1), resp.AllocatedQuantity) + err = rs.AssertCalls([]string{"UpdateContainerResources"}) + as.Nil(err) + + // ensures that reconcile do allocation for active pods without allocatation results + pod1 := makePod("Pod1", v1.ResourceList{v1.ResourceName(res1.resourceName): res1.resourceQuantity}) + testPods = append(testPods, pod1) + podsStub.activePods = testPods + mockStatus = getMockStatusWithPods(t, testPods) + testManager.podStatusProvider = mockStatus + testManager.activePods = podsStub.getActivePods + + resp = testManager.podResources.containerResource(string(pod1.UID), pod1.Spec.Containers[0].Name, res1.resourceName) + as.Nil(resp) + + testManager.reconcileState() + + resp = testManager.podResources.containerResource(string(pod1.UID), pod1.Spec.Containers[0].Name, res1.resourceName) + as.NotNil(resp) + as.Equal(float64(res1.resourceQuantity.Value()), resp.AllocatedQuantity) +} + +func resourceStubAlloc() func(ctx context.Context, request *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) { + return func(ctx context.Context, request *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) { + resp := new(pluginapi.GetResourcesAllocationResponse) + resp.PodResources = make(map[string]*pluginapi.ContainerResources) + resp.PodResources[string(types.UID("Pod1"))] = new(pluginapi.ContainerResources) + resp.PodResources[string(types.UID("Pod1"))].ContainerResources = make(map[string]*pluginapi.ResourceAllocation) + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"] = new(pluginapi.ResourceAllocation) + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation = make(map[string]*pluginapi.ResourceAllocationInfo) + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"] = new(pluginapi.ResourceAllocationInfo) + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"].IsNodeResource = true + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"].IsScalarResource = true + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"].AllocatedQuantity = 1 + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"].Envs = make(map[string]string) + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"].Envs["kk1"] = "vv1" + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"].Annotations = make(map[string]string) + resp.PodResources[string(types.UID("Pod1"))].ContainerResources["Cont0"].ResourceAllocation["domain1.com/resource1"].Annotations["A1"] = "AA1" + return resp, nil + } +} + +func TestDeRegisterPlugin(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + + testResources := []TestResource{ + res1, + } + + podsStub := activePodsStub{ + activePods: []*v1.Pod{}, + } + tmpDir, err := ioutil.TempDir("/tmp", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + + _, exists := testManager.endpoints[res1.resourceName] + as.True(exists) + + as.NotNil(testManager.endpoints[res1.resourceName].e) + as.False(testManager.endpoints[res1.resourceName].e.isStopped()) + + testManager.DeRegisterPlugin(res1.resourceName) + + _, exists = testManager.endpoints[res1.resourceName] + as.True(exists) + + // ensures DeRegisterPlugin worked + as.NotNil(testManager.endpoints[res1.resourceName].e) + as.True(testManager.endpoints[res1.resourceName].e.isStopped()) +} + +func TestUtils(t *testing.T) { + as := require.New(t) + + //GetContainerTypeAndIndex + pod := &v1.Pod{} + pod.UID = types.UID("Pod1") + cont0 := v1.Container{} + cont0.Name = "InitContName0" + pod.Spec.InitContainers = append(pod.Spec.InitContainers, cont0) + contType, contIndex, err := GetContainerTypeAndIndex(pod, &cont0) + as.Nil(err) + as.Equal(pluginapi.ContainerType_INIT, contType) + as.Equal(uint64(0x0), contIndex) + cont1 := v1.Container{} + cont1.Name = "InitContName1" + pod.Spec.InitContainers = append(pod.Spec.InitContainers, cont1) + contType, contIndex, err = GetContainerTypeAndIndex(pod, &cont1) + as.Nil(err) + as.Equal(pluginapi.ContainerType_INIT, contType) + as.Equal(uint64(0x1), contIndex) + + cont2 := v1.Container{} + cont2.Name = "AppCont0" + pod.Spec.Containers = append(pod.Spec.Containers, cont2) + contType, contIndex, err = GetContainerTypeAndIndex(pod, &cont2) + as.Nil(err) + as.Equal(pluginapi.ContainerType_MAIN, contType) + as.Equal(uint64(0x0), contIndex) + + cont3 := v1.Container{} + cont3.Name = "AppCont1" + pod.Spec.Containers = append(pod.Spec.Containers, cont3) + contType, contIndex, err = GetContainerTypeAndIndex(pod, &cont3) + as.Nil(err) + as.Equal(pluginapi.ContainerType_SIDECAR, contType) + as.Equal(uint64(0x1), contIndex) +} + +func TestUpdateAllocatedResource(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI), + } + testResources := []TestResource{ + res1, + res2, + } + + pod1 := makePod("Pod1", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}) + pod2 := makePod("Pod2", v1.ResourceList{ + v1.ResourceName(res2.resourceName): res2.resourceQuantity}) + + testPods := []*v1.Pod{ + pod1, + pod2, + } + + podsStub := activePodsStub{ + activePods: testPods, + } + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + + err = testManager.Allocate(pod1, &pod1.Spec.Containers[0]) + as.Nil(err) + resourceAllocation1 := testManager.podResources.containerResource(string(pod1.UID), pod1.Spec.Containers[0].Name, res1.resourceName) + as.NotNil(resourceAllocation1) + as.Equal(float64(res1.resourceQuantity.Value()), resourceAllocation1.AllocatedQuantity) + + err = testManager.Allocate(pod2, &pod2.Spec.Containers[0]) + as.Nil(err) + resourceAllocation2 := testManager.podResources.containerResource(string(pod2.UID), pod2.Spec.Containers[0].Name, res2.resourceName) + as.NotNil(resourceAllocation2) + as.Equal(float64(res2.resourceQuantity.Value()), resourceAllocation2.AllocatedQuantity) + + podsStub1 := activePodsStub{ + activePods: []*v1.Pod{pod1}, + } + testManager.activePods = podsStub1.getActivePods + testManager.UpdateAllocatedResources() + + // ensures that resourceAllocation1 still exists + resourceAllocation1 = testManager.podResources.containerResource(string(pod1.UID), pod1.Spec.Containers[0].Name, res1.resourceName) + as.NotNil(resourceAllocation1) + as.Equal(float64(res1.resourceQuantity.Value()), resourceAllocation1.AllocatedQuantity) + + // ensures that resourceAllocation2 was removed + resourceAllocation2 = testManager.podResources.containerResource(string(pod2.UID), pod2.Spec.Containers[0].Name, res2.resourceName) + as.Nil(resourceAllocation2) +} + +func TestCheckPoint(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI), + } + + testResources := make([]TestResource, 0, 2) + testResources = append(testResources, res1) + testResources = append(testResources, res2) + + testPods := []*v1.Pod{ + makePod("Pod0", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}), + makePod("Pod1", v1.ResourceList{ + v1.ResourceName(res2.resourceName): res2.resourceQuantity}), + } + + podsStub := activePodsStub{ + activePods: testPods, + } + tmpDir, err := ioutil.TempDir("/tmp", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + + path := filepath.Join(testManager.socketdir, kubeletQoSResourceManagerCheckpoint) + _, err = os.Create(path) + as.Nil(err) + + // Before qrm start + err = os.Remove(path) + as.Nil(err) + + mockStatus := getMockStatusWithPods(t, testPods) + err = testManager.Start(podsStub.getActivePods, &sourcesReadyStub{}, mockStatus, &apitest.FakeRuntimeService{}) + as.Nil(err) + + err = testManager.Allocate(testPods[0], &testPods[0].Spec.Containers[0]) + as.Nil(err) + _, err = os.Stat(path) + // ensures that checkpoint exists after allocation + as.Nil(err) + + //After qrm start + err = testManager.Stop() + as.Nil(err) + + mockStatus = getMockStatusWithPods(t, testPods) + err = testManager.Start(podsStub.getActivePods, &sourcesReadyStub{}, mockStatus, &apitest.FakeRuntimeService{}) + as.Nil(err) + + testManager.registerEndpoint("domain1.com/resource1", new(pluginapi.ResourcePluginOptions), &MockEndpoint{allocateFunc: allocateStubFunc()}) + testManager.registerEndpoint("domain2.com/resource2", &pluginapi.ResourcePluginOptions{ + NeedReconcile: true, + }, &MockEndpoint{ + allocateFunc: func(req *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + resp := new(pluginapi.ResourceAllocationResponse) + resp.AllocationResult = new(pluginapi.ResourceAllocation) + resp.AllocationResult.ResourceAllocation = make(map[string]*pluginapi.ResourceAllocationInfo) + resp.AllocationResult.ResourceAllocation["domain2.com/resource2"] = new(pluginapi.ResourceAllocationInfo) + resp.AllocationResult.ResourceAllocation["domain2.com/resource2"].Envs = make(map[string]string) + resp.AllocationResult.ResourceAllocation["domain2.com/resource2"].Envs["key2"] = "val2" + resp.AllocationResult.ResourceAllocation["domain2.com/resource2"].IsScalarResource = true + resp.AllocationResult.ResourceAllocation["domain2.com/resource2"].IsNodeResource = true + resp.AllocationResult.ResourceAllocation["domain2.com/resource2"].AllocatedQuantity = 1 + resp.AllocationResult.ResourceAllocation["domain2.com/resource2"].AllocationResult = "0" + return resp, nil + }, + }) + + err = os.Remove(path) + as.Nil(err) + + err = testManager.Allocate(testPods[1], &testPods[1].Spec.Containers[0]) + as.Nil(err) + + _, err = os.Stat(path) + as.Nil(err) + + err = os.Remove(path) + as.Nil(err) + + resp := testManager.podResources.containerAllResources(string(testPods[1].UID), testPods[1].Spec.Containers[0].Name) + as.NotNil(resp) + + // ensures checkpoint matches with result from allocateFunc after remove checkpoint file + as.Equal(resp[res2.resourceName].AllocationResult, "0") + + testManager.reconcileState() + + // ensures that chk resumes after reconcile + _, err = os.Stat(path) + as.Nil(err) + + resp = testManager.podResources.containerAllResources(string(testPods[1].UID), testPods[1].Spec.Containers[0].Name) + as.NotNil(resp) + + // ensures checkpoint matches with result from allocateFunc after checkpoint file resumes + as.Equal(resp[res2.resourceName].AllocationResult, "0") +} + +func TestGetResourceRunContainerOptions(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + + pod1 := makePod("Pod1", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}) + + pod2 := makePod("Pod2", v1.ResourceList{ + v1.ResourceName(res2.resourceName): res2.resourceQuantity}) + + podsStub := activePodsStub{ + activePods: []*v1.Pod{pod1, pod2}, + } + + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, []TestResource{res1, res2}) + as.Nil(err) + + err = testManager.Allocate(pod1, &pod1.Spec.Containers[0]) + as.Nil(err) + + err = testManager.Allocate(pod2, &pod2.Spec.Containers[0]) + as.Nil(err) + + runContainerOpts1, err := testManager.GetResourceRunContainerOptions(pod1, &pod1.Spec.Containers[0]) + as.Nil(err) + as.NotNil(runContainerOpts1.Resources) + + as.Equal(pod1.Annotations[PreStartAnnotationKey], PreStartAnnotationValue) + + runContainerOpts2, err := testManager.GetResourceRunContainerOptions(pod2, &pod2.Spec.Containers[0]) + as.Nil(err) + as.NotNil(runContainerOpts2.Resources) + + // ensures prestart has been called + as.Equal(pod2.Annotations[PreStartAnnotationKey], PreStartAnnotationValue) + + // ensures AllocationResult and OCI result in chk and runContainerOpt are equal + resp1 := testManager.podResources.containerAllResources(string(pod1.UID), pod1.Spec.Containers[0].Name) + as.NotNil(resp1) + as.NotNil(resp1[res1.resourceName]) + + resp2 := testManager.podResources.containerAllResources(string(pod2.UID), pod2.Spec.Containers[0].Name) + as.NotNil(resp2) + as.NotNil(resp2[res2.resourceName]) + + as.Equal(resp1[res1.resourceName].AllocationResult, runContainerOpts1.Resources.GetCpusetCpus()) + as.Equal(resp2[res2.resourceName].AllocationResult, runContainerOpts2.Resources.GetCpusetMems()) + + // ensures annotations and envs in chk and runContainerOpt are equal + as.Condition(func() bool { + return annotationsEqual(resp1[res1.resourceName].Annotations, runContainerOpts1.Annotations) + }) + as.Condition(func() bool { + return annotationsEqual(resp2[res2.resourceName].Annotations, runContainerOpts2.Annotations) + }) + as.Condition(func() bool { + return envsEqual(resp1[res1.resourceName].Envs, runContainerOpts1.Envs) + }) + as.Condition(func() bool { + return envsEqual(resp2[res2.resourceName].Envs, runContainerOpts2.Envs) + }) +} + +func envsEqual(env1 map[string]string, env2 []container.EnvVar) bool { + if len(env1) != len(env2) { + return false + } + + for _, env := range env2 { + if val, found := env1[env.Name]; !found || val != env.Value { + return false + } + } + + return true +} + +func annotationsEqual(anno1 map[string]string, anno2 []container.Annotation) bool { + if len(anno1) != len(anno2) { + return false + } + + for _, anno := range anno2 { + if val, found := anno1[anno.Name]; !found || val != anno.Value { + return false + } + } + + return true +} + +var flag int = 0 + +func topologyStubAllocatable() func(ctx context.Context, request *pluginapi.GetTopologyAwareAllocatableResourcesRequest) (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) { + return func(ctx context.Context, request *pluginapi.GetTopologyAwareAllocatableResourcesRequest) (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) { + resp := new(pluginapi.GetTopologyAwareAllocatableResourcesResponse) + resp.AllocatableResources = make(map[string]*pluginapi.AllocatableTopologyAwareResource) + if flag == 1 { + resp.AllocatableResources["domain1.com/resource1"] = new(pluginapi.AllocatableTopologyAwareResource) + resp.AllocatableResources["domain1.com/resource1"].IsNodeResource = true + resp.AllocatableResources["domain1.com/resource1"].IsScalarResource = true + resp.AllocatableResources["domain1.com/resource1"].AggregatedAllocatableQuantity = 3 + resp.AllocatableResources["domain1.com/resource1"].AggregatedCapacityQuantity = 3 + } + if flag == 2 { + resp.AllocatableResources["domain1.com/resource1"] = new(pluginapi.AllocatableTopologyAwareResource) + resp.AllocatableResources["domain1.com/resource1"].IsNodeResource = true + resp.AllocatableResources["domain1.com/resource1"].IsScalarResource = true + resp.AllocatableResources["domain1.com/resource1"].AggregatedCapacityQuantity = 3 + resp.AllocatableResources["domain1.com/resource1"].AggregatedAllocatableQuantity = 3 + resp.AllocatableResources["domain2.com/resource2"] = new(pluginapi.AllocatableTopologyAwareResource) + resp.AllocatableResources["domain2.com/resource2"].IsNodeResource = true + resp.AllocatableResources["domain2.com/resource2"].IsScalarResource = true + resp.AllocatableResources["domain2.com/resource2"].AggregatedAllocatableQuantity = 4 + resp.AllocatableResources["domain2.com/resource2"].AggregatedCapacityQuantity = 4 + } + if flag == 3 { + resp.AllocatableResources["domain1.com/resource1"] = new(pluginapi.AllocatableTopologyAwareResource) + resp.AllocatableResources["domain1.com/resource1"].IsNodeResource = false + resp.AllocatableResources["domain1.com/resource1"].IsScalarResource = true + resp.AllocatableResources["domain1.com/resource1"].AggregatedAllocatableQuantity = 4 + resp.AllocatableResources["domain1.com/resource1"].AggregatedCapacityQuantity = 4 + resp.AllocatableResources["domain1.com/resource1"].TopologyAwareAllocatableQuantityList = []*pluginapi.TopologyAwareQuantity{ + {ResourceValue: 2, Node: 0}, + {ResourceValue: 2, Node: 1}, + } + resp.AllocatableResources["domain1.com/resource1"].TopologyAwareCapacityQuantityList = []*pluginapi.TopologyAwareQuantity{ + {ResourceValue: 2, Node: 0}, + {ResourceValue: 2, Node: 1}, + } + } + if flag == 4 { + return resp, fmt.Errorf("GetTopologyAwareAllocatableResourcesResponse failed") + } + //fmt.Printf("%f\n", resp.AllocatableResources.TopologyAwareResources["domain1.com/resource1"].AggregatedQuantity) + return resp, nil + } + +} + +func TestGetCapacity(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI), + } + + testResources := []TestResource{ + res1, + res2, + } + + podsStub := activePodsStub{ + activePods: []*v1.Pod{}, + } + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + + //add 3 resource1 + //Stops resource1 endpoint. + flag = 1 + SGP = 1 + testManager.endpoints[res1.resourceName] = endpointInfo{ + e: &MockEndpoint{topologyAllocatable: topologyStubAllocatable()}, + } + testManager.endpoints["cpu"] = endpointInfo{ + e: &MockEndpoint{}, + } + _, allocatable, deletedResourcesName := testManager.GetCapacity() + as.Equal(v1.ResourceList{}, allocatable) + ds := sets.NewString(deletedResourcesName...) + as.True(ds.Has(res1.resourceName)) + as.True(ds.Has(res2.resourceName)) + // not k8s scalar resource, so ensures ignoring + as.False(ds.Has("cpu")) + // + SGP = 0 + testManager.endpoints[res1.resourceName] = endpointInfo{ + e: &MockEndpoint{topologyAllocatable: topologyStubAllocatable()}, + } + capacity, allocatable, _ := testManager.GetCapacity() + ExpectC, _ := resource.ParseQuantity(fmt.Sprintf("%.3f", float64(3))) + ExpectA, _ := resource.ParseQuantity(fmt.Sprintf("%.3f", float64(3))) + as.Equal(ExpectC, capacity["domain1.com/resource1"]) + as.Equal(ExpectA, allocatable["domain1.com/resource1"]) + + //add 4 resource2 + flag = 2 + testManager.endpoints[res2.resourceName] = endpointInfo{ + e: &MockEndpoint{topologyAllocatable: topologyStubAllocatable()}, + } + capacity, allocatable, _ = testManager.GetCapacity() + ExpectC, _ = resource.ParseQuantity(fmt.Sprintf("%.3f", float64(4))) + ExpectA, _ = resource.ParseQuantity(fmt.Sprintf("%.3f", float64(4))) + as.Equal(ExpectC, capacity["domain2.com/resource2"]) + as.Equal(ExpectA, allocatable["domain2.com/resource2"]) +} + +func TestGetTopologyAwareAllocatableResources(t *testing.T) { + as := require.New(t) + + podsStub := activePodsStub{ + activePods: []*v1.Pod{}, + } + tmpDir, err := ioutil.TempDir("/tmp", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, nil) + as.Nil(err) + + testManager.registerEndpoint("domain1.com/resource1", &pluginapi.ResourcePluginOptions{ + PreStartRequired: true, + WithTopologyAlignment: true, + NeedReconcile: true, + }, &MockEndpoint{topologyAllocatable: topologyStubAllocatable()}) + + flag = 3 + resp, err := testManager.GetTopologyAwareAllocatableResources() + as.Nil(err) + as.NotNil(resp.AllocatableResources) + as.NotNil(resp.AllocatableResources["domain1.com/resource1"]) + as.Equal(resp.AllocatableResources["domain1.com/resource1"], &pluginapi.AllocatableTopologyAwareResource{ + IsNodeResource: false, + IsScalarResource: true, + AggregatedCapacityQuantity: 4, + AggregatedAllocatableQuantity: 4, + TopologyAwareCapacityQuantityList: []*pluginapi.TopologyAwareQuantity{ + {ResourceValue: 2, Node: 0}, + {ResourceValue: 2, Node: 1}, + }, + TopologyAwareAllocatableQuantityList: []*pluginapi.TopologyAwareQuantity{ + {ResourceValue: 2, Node: 0}, + {ResourceValue: 2, Node: 1}, + }, + }) + + flag = 4 + resp, err = testManager.GetTopologyAwareAllocatableResources() + as.NotNil(err) +} + +func TestGetTopologyAwareResources(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + + pod1 := makePod("Pod1", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}) + + pod2 := makePod("Pod2", v1.ResourceList{ + v1.ResourceName(res2.resourceName): res2.resourceQuantity}) + + podsStub := activePodsStub{ + activePods: []*v1.Pod{pod1, pod2}, + } + + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, []TestResource{res1, res2}) + as.Nil(err) + + err = testManager.Allocate(pod1, &pod1.Spec.Containers[0]) + as.Nil(err) + res1Allocation := testManager.podResources.containerResource(string(pod1.UID), pod1.Spec.Containers[0].Name, res1.resourceName) + as.NotNil(res1Allocation) + as.Equal(float64(res1.resourceQuantity.Value()), res1Allocation.AllocatedQuantity) + + err = testManager.Allocate(pod2, &pod2.Spec.Containers[0]) + as.Nil(err) + res2Allocation := testManager.podResources.containerResource(string(pod2.UID), pod2.Spec.Containers[0].Name, res2.resourceName) + as.NotNil(res2Allocation) + as.Equal(float64(res2.resourceQuantity.Value()), res2Allocation.AllocatedQuantity) + + pod1Resp := &pluginapi.GetTopologyAwareResourcesResponse{ + PodUid: string(pod1.UID), + PodName: pod1.Name, + PodNamespace: pod1.Namespace, + ContainerTopologyAwareResources: &pluginapi.ContainerTopologyAwareResources{ + ContainerName: pod1.Spec.Containers[0].Name, + AllocatedResources: map[string]*pluginapi.TopologyAwareResource{ + res1.resourceName: { + IsNodeResource: res1Allocation.IsNodeResource, + IsScalarResource: res1Allocation.IsScalarResource, + AggregatedQuantity: res1Allocation.AllocatedQuantity, + TopologyAwareQuantityList: []*pluginapi.TopologyAwareQuantity{ + {ResourceValue: res1Allocation.AllocatedQuantity, Node: 0}, + }, + }, + }, + }, + } + + pod2Resp := &pluginapi.GetTopologyAwareResourcesResponse{ + PodUid: string(pod2.UID), + PodName: pod2.Name, + PodNamespace: pod2.Namespace, + ContainerTopologyAwareResources: &pluginapi.ContainerTopologyAwareResources{ + ContainerName: pod2.Spec.Containers[0].Name, + AllocatedResources: map[string]*pluginapi.TopologyAwareResource{ + res2.resourceName: { + IsNodeResource: res2Allocation.IsNodeResource, + IsScalarResource: res2Allocation.IsScalarResource, + AggregatedQuantity: res2Allocation.AllocatedQuantity, + TopologyAwareQuantityList: []*pluginapi.TopologyAwareQuantity{ + {ResourceValue: res2Allocation.AllocatedQuantity, Node: 1}, + }, + }, + }, + }, + } + + testManager.registerEndpoint(res1.resourceName, &pluginapi.ResourcePluginOptions{ + PreStartRequired: true, + WithTopologyAlignment: true, + NeedReconcile: true, + }, &MockEndpoint{topologyAllocated: func(ctx context.Context, request *pluginapi.GetTopologyAwareResourcesRequest) (*pluginapi.GetTopologyAwareResourcesResponse, error) { + if request == nil { + return nil, fmt.Errorf("getTopologyAwareResources got nil request") + } + + if request.PodUid == string(pod1.UID) && request.ContainerName == pod1.Spec.Containers[0].Name { + return proto.Clone(pod1Resp).(*pluginapi.GetTopologyAwareResourcesResponse), nil + } + + return nil, nil + }}) + + testManager.registerEndpoint(res2.resourceName, &pluginapi.ResourcePluginOptions{ + PreStartRequired: true, + WithTopologyAlignment: true, + NeedReconcile: true, + }, &MockEndpoint{topologyAllocated: func(ctx context.Context, request *pluginapi.GetTopologyAwareResourcesRequest) (*pluginapi.GetTopologyAwareResourcesResponse, error) { + if request == nil { + return nil, fmt.Errorf("getTopologyAwareResources got nil request") + } + + if request.PodUid == string(pod2.UID) && request.ContainerName == pod2.Spec.Containers[0].Name { + return proto.Clone(pod2Resp).(*pluginapi.GetTopologyAwareResourcesResponse), nil + } + + return nil, nil + }}) + + resp1, err := testManager.GetTopologyAwareResources(pod1, &pod1.Spec.Containers[0]) + as.Nil(err) + as.Equal(resp1, pod1Resp) + + resp2, err := testManager.GetTopologyAwareResources(pod2, &pod2.Spec.Containers[0]) + as.Nil(err) + as.Equal(resp2, pod2Resp) +} + +func TestUpdatePluginResources(t *testing.T) { + as := require.New(t) + + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + + testResources := []TestResource{ + res1, + } + + pod1 := makePod("Pod1", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}) + + testPods := []*v1.Pod{ + pod1, + } + + podsStub := activePodsStub{ + activePods: testPods, + } + + tmpDir, err := ioutil.TempDir("/tmp", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + + err = testManager.Allocate(pod1, &pod1.Spec.Containers[0]) + as.Nil(err) + + cachedNode := &v1.Node{} + nodeInfo := &schedulerframework.NodeInfo{} + nodeInfo.SetNode(cachedNode) + + testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod1}) + allocatableResource := nodeInfo.Allocatable + as.NotNil(allocatableResource) + as.Equal(res1.resourceQuantity.Value(), allocatableResource.ScalarResources[v1.ResourceName(res1.resourceName)]) +} + +func constructResourceAlloc(OciPropertyName string, IsNodeResource bool, IsScalarResource bool) *pluginapi.ResourceAllocationInfo { + resp := &pluginapi.ResourceAllocationInfo{} + resp.OciPropertyName = OciPropertyName + resp.IsNodeResource = IsNodeResource + resp.IsScalarResource = IsScalarResource + return resp +} + +/* +func TestResetExtendedResource(t *testing.T) { + as := assert.New(t) + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + ckm, err := checkpointmanager.NewCheckpointManager(tmpDir) + as.Nil(err) + testManager := &ManagerImpl{ + endpoints: make(map[string]endpointInfo), + allocatedScalarResourcesQuantity: make(map[string]float64), + podResources: newPodResourcesChk(), + checkpointManager: ckm, + } + extendedResourceName := "domain.com/resource1" + testManager.podResources.insert("pod", "con", extendedResourceName, constructResourceAlloc("Name", true, true)) + + //checkpoint is present, indicating node hasn't been recreated + err = testManager.writeCheckpoint() + as.Nil(err) + as.False(testManager.ShouldResetExtendedResourceCapacity()) + + //checkpoint is absent, representing node recreation + ckpts, err := ckm.ListCheckpoints() + as.Nil(err) + for _, ckpt := range ckpts { + err := ckm.RemoveCheckpoint(ckpt) + as.Nil(err) + } + utilfeature.DefaultMutableFeatureGate.Set("QoSResourceManager=true") + as.True(testManager.ShouldResetExtendedResourceCapacity()) +} +*/ +func setupManager(t *testing.T, socketName string) *ManagerImpl { + topologyStore := topologymanager.NewFakeManager() + m, err := newManagerImpl(socketName, topologyStore, time.Second, nil) + require.NoError(t, err) + + activePods := func() []*v1.Pod { + return []*v1.Pod{} + } + + mockStatus := new(statustest.MockPodStatusProvider) + + err = m.Start(activePods, &sourcesReadyStub{}, mockStatus, &apitest.FakeRuntimeService{}) + require.NoError(t, err) + + return m +} + +func setupPlugin(t *testing.T, pluginSocketName string) *Stub { + p := NewResourcePluginStub(pluginSocketName, testResourceName, false) + err := p.Start() + require.NoError(t, err) + return p +} + +func setupPluginManager(t *testing.T, pluginSocketName string, m *ManagerImpl) (pluginmanager.PluginManager, chan struct{}) { + pluginManager := pluginmanager.NewPluginManager( + filepath.Dir(pluginSocketName), /* sockDir */ + &record.FakeRecorder{}, + ) + + pluginManager.AddHandler(watcherapi.ResourcePlugin, m.GetWatcherHandler()) + + stopCh := make(chan struct{}) + runPluginManager(pluginManager, stopCh) + return pluginManager, stopCh +} + +func runPluginManager(pluginManager pluginmanager.PluginManager, stopCh chan struct{}) { + sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true }) + go pluginManager.Run(sourcesReady, stopCh) +} + +func setup(t *testing.T, socketName string, pluginSocketName string) (*ManagerImpl, *Stub) { + m := setupManager(t, socketName) + p := setupPlugin(t, pluginSocketName) + return m, p +} + +func setupInProbeMode(t *testing.T, socketName string, pluginSocketName string) (*ManagerImpl, *Stub, pluginmanager.PluginManager, chan struct{}) { + m := setupManager(t, socketName) + pm, stopCh := setupPluginManager(t, pluginSocketName, m) + p := setupPlugin(t, pluginSocketName) + return m, p, pm, stopCh +} + +func cleanup(t *testing.T, m *ManagerImpl, p *Stub, stopCh chan struct{}) { + p.Stop() + m.Stop() + + if stopCh != nil { + close(stopCh) + } +} + +func allocateStubFunc() func(*pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + return func(req *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + resp := new(pluginapi.ResourceAllocationResponse) + resp.ResourceName = "domain1.com/resource1" + //resp.ContainerName = "Cont1" + resp.AllocationResult = new(pluginapi.ResourceAllocation) + resp.AllocationResult.ResourceAllocation = make(map[string]*pluginapi.ResourceAllocationInfo) + resp.AllocationResult.ResourceAllocation["domain1.com/resource1"] = new(pluginapi.ResourceAllocationInfo) + resp.AllocationResult.ResourceAllocation["domain1.com/resource1"].Envs = make(map[string]string) + resp.AllocationResult.ResourceAllocation["domain1.com/resource1"].Envs["key1"] = "val1" + resp.AllocationResult.ResourceAllocation["domain1.com/resource1"].IsScalarResource = true + resp.AllocationResult.ResourceAllocation["domain1.com/resource1"].IsNodeResource = true + resp.AllocationResult.ResourceAllocation["domain1.com/resource1"].AllocatedQuantity = 2 + return resp, nil + } +} + +func getMockStatusWithPods(t *testing.T, pods []*v1.Pod) status.PodStatusProvider { + mockCtrl := gomock.NewController(t) + mockStatus := statustest.NewMockPodStatusProvider(mockCtrl) + + for _, pod := range pods { + containerStatuses := make([]v1.ContainerStatus, 0, len(pod.Spec.Containers)) + + for _, container := range pod.Spec.Containers { + ContStat := v1.ContainerStatus{ + Name: container.Name, + ContainerID: fmt.Sprintf("ContId://%s", uuid.NewUUID()), + } + containerStatuses = append(containerStatuses, ContStat) + } + + p0Time := metav1.Now() + mockStatus.EXPECT().GetPodStatus(pod.UID).Return(v1.PodStatus{StartTime: &p0Time, ContainerStatuses: containerStatuses}, true).AnyTimes() + } + + return mockStatus +} + +func registerEndpointByRes(manager *ManagerImpl, testRes []TestResource) error { + if manager == nil { + return fmt.Errorf("registerEndpointByRes got nil manager") + } + + for i, res := range testRes { + var OciPropertyName string + if res.resourceName == "domain1.com/resource1" { + OciPropertyName = "CpusetCpus" + } else if res.resourceName == "domain2.com/resource2" { + OciPropertyName = "CpusetMems" + } + + curResourceName := res.resourceName + + if res.resourceName == "domain1.com/resource1" || res.resourceName == "domain2.com/resource2" { + manager.registerEndpoint(curResourceName, &pluginapi.ResourcePluginOptions{ + PreStartRequired: true, + WithTopologyAlignment: true, + NeedReconcile: true, + }, &MockEndpoint{ + allocateFunc: func(req *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + if req == nil { + return nil, fmt.Errorf("allocateFunc got nil request") + } + + resp := new(pluginapi.ResourceAllocationResponse) + resp.AllocationResult = new(pluginapi.ResourceAllocation) + resp.AllocationResult.ResourceAllocation = make(map[string]*pluginapi.ResourceAllocationInfo) + resp.AllocationResult.ResourceAllocation[curResourceName] = new(pluginapi.ResourceAllocationInfo) + resp.AllocationResult.ResourceAllocation[curResourceName].Envs = make(map[string]string) + resp.AllocationResult.ResourceAllocation[curResourceName].Envs[fmt.Sprintf("key%d", i)] = fmt.Sprintf("val%d", i) + resp.AllocationResult.ResourceAllocation[curResourceName].Annotations = make(map[string]string) + resp.AllocationResult.ResourceAllocation[curResourceName].Annotations[fmt.Sprintf("key%d", i)] = fmt.Sprintf("val%d", i) + resp.AllocationResult.ResourceAllocation[curResourceName].IsScalarResource = true + resp.AllocationResult.ResourceAllocation[curResourceName].IsNodeResource = true + resp.AllocationResult.ResourceAllocation[curResourceName].AllocatedQuantity = req.ResourceRequests[curResourceName] + resp.AllocationResult.ResourceAllocation[curResourceName].AllocationResult = "0-1" + resp.AllocationResult.ResourceAllocation[curResourceName].OciPropertyName = OciPropertyName + return resp, nil + }, + }) + } else if res.resourceName == "domain3.com/resource3" { + manager.registerEndpoint(curResourceName, &pluginapi.ResourcePluginOptions{ + PreStartRequired: true, + WithTopologyAlignment: true, + NeedReconcile: true, + }, &MockEndpoint{ + allocateFunc: func(req *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + return nil, fmt.Errorf("mock error") + }, + }) + } + } + + return nil +} + +func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestResource) (*ManagerImpl, error) { + ckm, err := checkpointmanager.NewCheckpointManager(tmpDir) + if err != nil { + return nil, err + } + testManager := &ManagerImpl{ + socketdir: tmpDir, + socketname: "/server.sock", + allocatedScalarResourcesQuantity: make(map[string]float64), + endpoints: make(map[string]endpointInfo), + podResources: newPodResourcesChk(), + topologyAffinityStore: topologymanager.NewFakeManager(), + activePods: activePods, + sourcesReady: &sourcesReadyStub{}, + checkpointManager: ckm, + containerRuntime: &apitest.FakeRuntimeService{}, + reconcilePeriod: 5 * time.Second, + } + + registerEndpointByRes(testManager, testRes) + + return testManager, nil +} diff --git a/pkg/kubelet/cm/qosresourcemanager/pod_resources.go b/pkg/kubelet/cm/qosresourcemanager/pod_resources.go new file mode 100644 index 0000000000000..fe8ee579fba0e --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/pod_resources.go @@ -0,0 +1,401 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qosresourcemanager + +import ( + "fmt" + "reflect" + "strconv" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + "k8s.io/klog/v2" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/cm/qosresourcemanager/checkpoint" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + + "github.com/golang/protobuf/proto" +) + +type ResourceAllocation map[string]*pluginapi.ResourceAllocationInfo // Keyed by resourceName. +type ContainerResources map[string]ResourceAllocation // Keyed by containerName. +type PodResources map[string]ContainerResources // Keyed by podUID + +type podResourcesChk struct { + sync.RWMutex + resources PodResources // Keyed by podUID. +} + +var EmptyValue = reflect.Value{} + +func newPodResourcesChk() *podResourcesChk { + return &podResourcesChk{ + resources: make(PodResources), + } +} + +func (pr PodResources) DeepCopy() PodResources { + copiedPodResources := make(PodResources) + + for podUID, containerResources := range pr { + copiedPodResources[podUID] = containerResources.DeepCopy() + } + + return copiedPodResources +} + +func (cr ContainerResources) DeepCopy() ContainerResources { + copiedContainerResources := make(ContainerResources) + + for containerName, resouceAllocation := range cr { + copiedContainerResources[containerName] = resouceAllocation.DeepCopy() + } + + return copiedContainerResources +} + +func (ra ResourceAllocation) DeepCopy() ResourceAllocation { + copiedResourceAllocation := make(ResourceAllocation) + + for resourceName, allocationInfo := range ra { + copiedResourceAllocation[resourceName] = proto.Clone(allocationInfo).(*pluginapi.ResourceAllocationInfo) + } + + return copiedResourceAllocation +} + +func (pres *podResourcesChk) pods() sets.String { + pres.RLock() + defer pres.RUnlock() + + ret := sets.NewString() + for k := range pres.resources { + ret.Insert(k) + } + return ret +} + +// "resourceName" here is different than "resourceName" in qrm allocation, one qrm plugin may +// only represent one resource in allocation, but can also return several other resourceNames +// to store in pod resources +func (pres *podResourcesChk) insert(podUID, contName, resourceName string, allocationInfo *pluginapi.ResourceAllocationInfo) { + if allocationInfo == nil { + return + } + + pres.Lock() + defer pres.Unlock() + + if _, podExists := pres.resources[podUID]; !podExists { + pres.resources[podUID] = make(ContainerResources) + } + if _, contExists := pres.resources[podUID][contName]; !contExists { + pres.resources[podUID][contName] = make(ResourceAllocation) + } + + pres.resources[podUID][contName][resourceName] = proto.Clone(allocationInfo).(*pluginapi.ResourceAllocationInfo) +} + +func (pres *podResourcesChk) deleteResourceAllocationInfo(podUID, contName, resourceName string) { + pres.Lock() + defer pres.Unlock() + + if pres.resources[podUID] != nil && pres.resources[podUID][contName] != nil { + delete(pres.resources[podUID][contName], resourceName) + } +} + +func (pres *podResourcesChk) deletePod(podUID string) { + pres.Lock() + defer pres.Unlock() + + if pres.resources == nil { + return + } + + delete(pres.resources, podUID) +} + +func (pres *podResourcesChk) delete(pods []string) { + pres.Lock() + defer pres.Unlock() + + if pres.resources == nil { + return + } + + for _, uid := range pods { + delete(pres.resources, uid) + } +} + +func (pres *podResourcesChk) podResources(podUID string) ContainerResources { + pres.RLock() + defer pres.RUnlock() + + if _, podExists := pres.resources[podUID]; !podExists { + return nil + } + + return pres.resources[podUID] +} + +// Returns all resources information allocated to the given container. +// Returns nil if we don't have cached state for the given . +func (pres *podResourcesChk) containerAllResources(podUID, contName string) ResourceAllocation { + pres.RLock() + defer pres.RUnlock() + + if _, podExists := pres.resources[podUID]; !podExists { + return nil + } + if _, contExists := pres.resources[podUID][contName]; !contExists { + return nil + } + + return pres.resources[podUID][contName].DeepCopy() +} + +// Returns resource information allocated to the given container for the given resource. +// Returns nil if we don't have cached state for the given . +func (pres *podResourcesChk) containerResource(podUID, contName, resource string) *pluginapi.ResourceAllocationInfo { + pres.RLock() + defer pres.RUnlock() + + if _, podExists := pres.resources[podUID]; !podExists { + return nil + } + if _, contExists := pres.resources[podUID][contName]; !contExists { + return nil + } + resourceAllocationInfo, resourceExists := pres.resources[podUID][contName][resource] + if !resourceExists || resourceAllocationInfo == nil { + return nil + } + return proto.Clone(resourceAllocationInfo).(*pluginapi.ResourceAllocationInfo) +} + +// Returns allocated scalar resources quantity used to sanitize node allocatable when pod admitting. +// Only for scalar resources need to be updated to node status. +func (pres *podResourcesChk) scalarResourcesQuantity() map[string]float64 { + pres.RLock() + defer pres.RUnlock() + + ret := make(map[string]float64) + for _, containerResources := range pres.resources { + for _, resourcesAllocation := range containerResources { + for resourceName, allocationInfo := range resourcesAllocation { + if allocationInfo.IsNodeResource && allocationInfo.IsScalarResource { + ret[resourceName] += allocationInfo.AllocatedQuantity + } + } + } + } + return ret +} + +// Turns podResourcesChk to checkpointData. +func (pres *podResourcesChk) toCheckpointData() []checkpoint.PodResourcesEntry { + pres.RLock() + defer pres.RUnlock() + + var data []checkpoint.PodResourcesEntry + for podUID, containerResources := range pres.resources { + for conName, resourcesAllocation := range containerResources { + for resourceName, allocationInfo := range resourcesAllocation { + allocRespBytes, err := allocationInfo.Marshal() + if err != nil { + klog.Errorf("Can't marshal allocationInfo for %v %v %v: %v", podUID, conName, resourceName, err) + continue + } + data = append(data, checkpoint.PodResourcesEntry{ + PodUID: podUID, + ContainerName: conName, + ResourceName: resourceName, + AllocationInfo: string(allocRespBytes)}) + } + } + } + return data +} + +// Populates podResourcesChk from the passed in checkpointData. +func (pres *podResourcesChk) fromCheckpointData(data []checkpoint.PodResourcesEntry) { + for _, entry := range data { + klog.V(2).Infof("Get checkpoint entry: %s %s %s %s\n", + entry.PodUID, entry.ContainerName, entry.ResourceName, entry.AllocationInfo) + allocationInfo := &pluginapi.ResourceAllocationInfo{} + err := allocationInfo.Unmarshal([]byte(entry.AllocationInfo)) + if err != nil { + klog.Errorf("Can't unmarshal allocationInfo for %s %s %s %s: %v", + entry.PodUID, entry.ContainerName, entry.ResourceName, entry.AllocationInfo, err) + continue + } + pres.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, allocationInfo) + } +} + +// [TODO](sunjianyu) to support setting value for struct type recursively +func setReflectValue(valueObj reflect.Value, value string) error { + switch valueObj.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + valueInt, pErr := strconv.ParseInt(value, 10, 64) + + if pErr != nil { + return fmt.Errorf("parse: %s to int failed with error: %v", value, pErr) + } + + valueObj.SetInt(valueInt) + case reflect.String: + valueObj.SetString(value) + default: + return fmt.Errorf("not supported type: %v set to value: %s", valueObj.Kind(), value) + } + + return nil +} + +func assembleOciResourceConfig(podUID, containerName string, opts *kubecontainer.ResourceRunContainerOptions, resources map[string]*pluginapi.ResourceAllocationInfo) error { + if opts == nil { + return fmt.Errorf("assembleOciResourceConfig got nil options") + } + + ociResourceConfig := &runtimeapi.LinuxContainerResources{} + ociResourceConfigValueElem := reflect.ValueOf(ociResourceConfig).Elem() + + for resourceName, resourceAllocationInfo := range resources { + if resourceAllocationInfo == nil { + klog.Warningf("[qosresourcemanager.assembleOciResourceConfig] resource: %s with nil resourceAllocationInfo", resourceName) + continue + } + + if resourceAllocationInfo.OciPropertyName != "" { + field := ociResourceConfigValueElem.FieldByName(resourceAllocationInfo.OciPropertyName) + + if field == EmptyValue { + return fmt.Errorf("OCI resource config doesn't support oci property name: %s for resource: %s", resourceAllocationInfo.OciPropertyName, resourceName) + } + + sErr := setReflectValue(field, resourceAllocationInfo.AllocationResult) + + if sErr != nil { + return fmt.Errorf("set oci property name: %s for resource: %s to value: %s failed with error: %v", + resourceAllocationInfo.OciPropertyName, resourceName, resourceAllocationInfo.AllocationResult, sErr) + } + + klog.Infof("[qosresourcemanager.assembleOciResourceConfig] podUID: %s, containerName: %s, set oci property: %s for resource: %s to value: %s in OCI resource config ", + podUID, containerName, resourceAllocationInfo.OciPropertyName, resourceName, resourceAllocationInfo.AllocationResult) + } + } + + opts.Resources = ociResourceConfig + return nil +} + +func (pres *podResourcesChk) allAllocatedNodeResourceNames() sets.String { + pres.RLock() + defer pres.RUnlock() + + res := sets.NewString() + + for _, containerResources := range pres.resources { + for _, resourcesAllocation := range containerResources { + for resourceName, allocation := range resourcesAllocation { + if allocation.IsNodeResource { + res.Insert(resourceName) + } + } + } + } + + return res +} + +func (pres *podResourcesChk) allAllocatedResourceNames() sets.String { + pres.RLock() + defer pres.RUnlock() + + res := sets.NewString() + + for _, containerResources := range pres.resources { + for _, resourcesAllocation := range containerResources { + for resourceName := range resourcesAllocation { + res.Insert(resourceName) + } + } + } + + return res +} + +// Returns combined container runtime settings to consume the container's allocated resources. +func (pres *podResourcesChk) resourceRunContainerOptions(podUID, contName string) (*kubecontainer.ResourceRunContainerOptions, error) { + pres.RLock() + defer pres.RUnlock() + + containers, exists := pres.resources[podUID] + if !exists { + return nil, nil + } + resources, exists := containers[contName] + if !exists { + return nil, nil + } + opts := &kubecontainer.ResourceRunContainerOptions{} + + // Maps to detect duplicate settings. + envsMap := make(map[string]string) + annotationsMap := make(map[string]string) + for _, resourceAllocationInfo := range resources { + for k, v := range resourceAllocationInfo.Envs { + if e, ok := envsMap[k]; ok { + klog.V(4).Infof("[qosresourcemanager] skip existing env %s %s for pod: %s, container: %s", k, v, podUID, contName) + if e != v { + klog.Errorf("[qosresourcemanager] environment variable %s has conflicting setting: %s and %s for for pod: %s, container: %s", + k, e, v, podUID, contName) + } + continue + } + klog.V(4).Infof("[qosresourcemanager] add env %s %s for pod: %s, container: %s", k, v, podUID, contName) + envsMap[k] = v + opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v}) + } + + // Updates for Annotations + for k, v := range resourceAllocationInfo.Annotations { + if e, ok := annotationsMap[k]; ok { + klog.V(4).Infof("[qosresourcemanager] skip existing annotation %s %s for pod: %s, container: %s", k, v, podUID, contName) + if e != v { + klog.Errorf("[qosresourcemanager] annotation %s has conflicting setting: %s and %s for pod: %s, container: %s", k, e, v, podUID, contName) + } + continue + } + klog.V(4).Infof("[qosresourcemanager] add annotation %s %s for pod: %s, container: %s", k, v, podUID, contName) + annotationsMap[k] = v + opts.Annotations = append(opts.Annotations, kubecontainer.Annotation{Name: k, Value: v}) + } + } + + err := assembleOciResourceConfig(podUID, contName, opts, resources) + + if err != nil { + return nil, fmt.Errorf("assembleOciResourceConfig failed with error: %v", err) + } + + return opts, nil +} diff --git a/pkg/kubelet/cm/qosresourcemanager/pod_resources_test.go b/pkg/kubelet/cm/qosresourcemanager/pod_resources_test.go new file mode 100644 index 0000000000000..bace3e9e645dd --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/pod_resources_test.go @@ -0,0 +1,246 @@ +package qosresourcemanager + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "k8s.io/apimachinery/pkg/util/sets" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +func TestPodResources(t *testing.T) { + podResources := newPodResourcesChk() + + type resAllocation struct { + resName string + allocation *pluginapi.ResourceAllocationInfo + } + + normalAllocation := generateResourceAllocationInfo() + + normalAllocation2 := generateResourceAllocationInfo() + normalAllocation2.Annotations["mock_key_2"] = "mock_ano_2" + normalAllocation2.OciPropertyName = "CpusetMems" + normalAllocation2.AllocationResult = "1,2" + + overrideAllocation := generateResourceAllocationInfo() + overrideAllocation.Envs["mock_key"] = "mock_env_2" + overrideAllocation.Annotations["mock_key_2"] = "mock_ano_2" + overrideAllocation.AllocatedQuantity = 4 + + invalidAllocation := generateResourceAllocationInfo() + invalidAllocation.OciPropertyName = "mock" + + type testCase struct { + // inserted pod resources + description string + podUID string + conName string + allocations []resAllocation + + // testing results + resConResource ContainerResources + resScalarResource map[string]float64 + resResourceNames sets.String + resOptions *kubecontainer.ResourceRunContainerOptions + resOptionsErr error + } + testCases := []testCase{ + { + description: "insert pod resources with whole info", + podUID: "mock_pod", + conName: "mock_con", + allocations: []resAllocation{ + {resName: "mock_res", allocation: normalAllocation}, + }, + + resConResource: ContainerResources{ + "mock_con": { + "mock_res": normalAllocation, + }, + }, + resScalarResource: map[string]float64{"mock_res": 3}, + resResourceNames: sets.NewString("mock_res"), + resOptions: &kubecontainer.ResourceRunContainerOptions{ + Envs: []kubecontainer.EnvVar{ + { + Name: "mock_key", + Value: "mock_env", + }, + }, + Annotations: []kubecontainer.Annotation{ + { + Name: "mock_key", + Value: "mock_ano", + }, + }, + Resources: &runtimeapi.LinuxContainerResources{ + CpusetCpus: "5-6,10", + }, + }, + resOptionsErr: nil, + }, + { + description: "insert pod resources with multiple resources", + podUID: "mock_pod", + conName: "mock_con", + allocations: []resAllocation{ + {resName: "mock_res", allocation: normalAllocation}, + {resName: "mock_res_2", allocation: normalAllocation2}, + }, + + resConResource: ContainerResources{ + "mock_con": { + "mock_res": normalAllocation, + "mock_res_2": normalAllocation2, + }, + }, + resScalarResource: map[string]float64{"mock_res": 3, "mock_res_2": 3}, + resResourceNames: sets.NewString("mock_res", "mock_res_2"), + resOptions: &kubecontainer.ResourceRunContainerOptions{ + Envs: []kubecontainer.EnvVar{ + { + Name: "mock_key", + Value: "mock_env", + }, + }, + Annotations: []kubecontainer.Annotation{ + { + Name: "mock_key", + Value: "mock_ano", + }, + { + Name: "mock_key_2", + Value: "mock_ano_2", + }, + }, + Resources: &runtimeapi.LinuxContainerResources{ + CpusetCpus: "5-6,10", + CpusetMems: "1,2", + }, + }, + resOptionsErr: nil, + }, + { + description: "override pod resources with whole info", + podUID: "mock_pod", + conName: "mock_con", + allocations: []resAllocation{ + {resName: "mock_res", allocation: normalAllocation}, + {resName: "mock_res", allocation: overrideAllocation}, + }, + + resConResource: ContainerResources{ + "mock_con": { + "mock_res": overrideAllocation, + }, + }, + resScalarResource: map[string]float64{"mock_res": 4}, + resResourceNames: sets.NewString("mock_res"), + resOptions: &kubecontainer.ResourceRunContainerOptions{ + Envs: []kubecontainer.EnvVar{ + { + Name: "mock_key", + Value: "mock_env_2", + }, + }, + Annotations: []kubecontainer.Annotation{ + { + Name: "mock_key", + Value: "mock_ano", + }, + { + Name: "mock_key_2", + Value: "mock_ano_2", + }, + }, + Resources: &runtimeapi.LinuxContainerResources{ + CpusetCpus: "5-6,10", + }, + }, + resOptionsErr: nil, + }, + { + description: "insert pod resources with invalid oci config", + podUID: "mock_pod", + conName: "mock_con", + allocations: []resAllocation{ + {resName: "mock_res", allocation: invalidAllocation}, + }, + + resConResource: ContainerResources{ + "mock_con": { + "mock_res": invalidAllocation, + }, + }, + resScalarResource: map[string]float64{"mock_res": 3}, + resResourceNames: sets.NewString("mock_res"), + resOptionsErr: errors.New(""), + }, + } + + convertENVToMap := func(envs []kubecontainer.EnvVar) map[string]string { + res := make(map[string]string) + for _, env := range envs { + res[env.Name] = env.Value + } + return res + } + convertAnnotationToMap := func(annotations []kubecontainer.Annotation) map[string]string { + res := make(map[string]string) + for _, ano := range annotations { + res[ano.Name] = ano.Value + } + return res + } + check := func(prefix string, tc testCase) { + resConResource := podResources.podResources(tc.podUID) + resScalarResource := podResources.scalarResourcesQuantity() + resResourceNames := podResources.allAllocatedResourceNames() + resOptions, resOptionsErr := podResources.resourceRunContainerOptions(tc.podUID, tc.conName) + + require.Equal(t, resConResource, tc.resConResource, "%v/%v: pod container resources not equal", prefix, tc.description) + require.Equal(t, resScalarResource, tc.resScalarResource, "%v/%v: scalar resources not equal", prefix, tc.description) + require.Equal(t, resResourceNames, tc.resResourceNames, "%v/%v: all resource names not equal", prefix, tc.description) + require.Equal(t, resOptions == nil, tc.resOptions == nil, "%v/%v: container options not equal", prefix, tc.description) + require.Equal(t, resOptionsErr == nil, tc.resOptionsErr == nil, "%v/%v: container options error not equal", prefix, tc.description) + if resOptions != nil && tc.resOptions != nil { + require.Equal(t, resOptions.Resources, tc.resOptions.Resources, "%v/%v: container options [resources] not equal", prefix, tc.description) + require.Equal(t, convertENVToMap(resOptions.Envs), convertENVToMap(tc.resOptions.Envs), "%v/%v: container options [envs] not equal", prefix, tc.description) + require.Equal(t, convertAnnotationToMap(resOptions.Annotations), convertAnnotationToMap(tc.resOptions.Annotations), "%v/%v: container options [annotations] not equal", prefix, tc.description) + } + } + + for _, tc := range testCases { + t.Logf("%v", tc.description) + for _, a := range tc.allocations { + podResources.insert(tc.podUID, tc.conName, a.resName, a.allocation) + } + + check("before reloading checkpoint", tc) + data := podResources.toCheckpointData() + + podResources.delete([]string{tc.podUID}) + podResources.fromCheckpointData(data) + check("after reloading checkpoint", tc) + + podResources.delete([]string{tc.podUID}) + } +} + +func generateResourceAllocationInfo() *pluginapi.ResourceAllocationInfo { + return &pluginapi.ResourceAllocationInfo{ + OciPropertyName: "CpusetCpus", + IsNodeResource: true, + IsScalarResource: true, + AllocatedQuantity: 3, + AllocationResult: "5-6,10", + Envs: map[string]string{"mock_key": "mock_env"}, + Annotations: map[string]string{"mock_key": "mock_ano"}, + ResourceHints: &pluginapi.ListOfTopologyHints{}, + } +} diff --git a/pkg/kubelet/cm/qosresourcemanager/resource_plugin_stub.go b/pkg/kubelet/cm/qosresourcemanager/resource_plugin_stub.go new file mode 100644 index 0000000000000..593a4d553eff5 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/resource_plugin_stub.go @@ -0,0 +1,261 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qosresourcemanager + +import ( + "context" + "log" + "net" + "os" + "path" + "sync" + "time" + + "google.golang.org/grpc" + + watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" +) + +// Stub implementation for ResourcePlugin. +type Stub struct { + socket string + resourceName string + preStartContainerFlag bool + + stop chan interface{} + wg sync.WaitGroup + + server *grpc.Server + + // allocFunc1 is used for handling allocation request + allocFunc1 stubAllocFunc1 + //handling get allocation request + allocFunc2 stubAllocFunc2 + + registrationStatus chan watcherapi.RegistrationStatus // for testing + endpoint string // for testing +} + +// stubAllocFunc1 is the function called when an allocation request is received from Kubelet +type stubAllocFunc1 func(r *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) + +//stubAllocFYnc2 is the function called when a get allocation request is received form Kubelet +type stubAllocFunc2 func(r *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) + +func defaultAllocFunc(r *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + var response pluginapi.ResourceAllocationResponse + + return &response, nil +} +func defaultGetAllocFunc(r *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) { + var response pluginapi.GetResourcesAllocationResponse + return &response, nil +} + +// NewResourcePluginStub returns an initialized ResourcePlugin Stub. +func NewResourcePluginStub(socket string, name string, preStartContainerFlag bool) *Stub { + return &Stub{ + socket: socket, + resourceName: name, + preStartContainerFlag: preStartContainerFlag, + + stop: make(chan interface{}), + + allocFunc1: defaultAllocFunc, + allocFunc2: defaultGetAllocFunc, + } +} + +// SetAllocFunc sets allocFunc of the resource plugin +func (m *Stub) SetAllocFunc(f stubAllocFunc1) { + m.allocFunc1 = f +} +func (m *Stub) SetGetAllocFunc(f stubAllocFunc2) { + m.allocFunc2 = f +} + +// Start starts the gRPC server of the resource plugin. Can only +// be called once. +func (m *Stub) Start() error { + err := m.cleanup() + if err != nil { + return err + } + + sock, err := net.Listen("unix", m.socket) + if err != nil { + return err + } + + m.wg.Add(1) + m.server = grpc.NewServer([]grpc.ServerOption{}...) + pluginapi.RegisterResourcePluginServer(m.server, m) + watcherapi.RegisterRegistrationServer(m.server, m) + + go func() { + defer func() { + m.wg.Done() + + if err := recover(); err != nil { + log.Fatalf("Start recover from err: %v", err) + } + }() + m.server.Serve(sock) + }() + _, conn, err := dial(m.socket) + if err != nil { + return err + } + conn.Close() + log.Printf("Starting to serve on %v", m.socket) + + return nil +} + +// Stop stops the gRPC server. Can be called without a prior Start +// and more than once. Not safe to be called concurrently by different +// goroutines! +func (m *Stub) Stop() error { + if m.server == nil { + return nil + } + m.server.Stop() + m.wg.Wait() + m.server = nil + close(m.stop) // This prevents re-starting the server. + + return m.cleanup() +} + +// GetInfo is the RPC which return pluginInfo +func (m *Stub) GetInfo(ctx context.Context, req *watcherapi.InfoRequest) (*watcherapi.PluginInfo, error) { + log.Println("GetInfo") + return &watcherapi.PluginInfo{ + Type: watcherapi.ResourcePlugin, + Name: m.resourceName, + Endpoint: m.endpoint, + SupportedVersions: []string{pluginapi.Version}}, nil +} + +// NotifyRegistrationStatus receives the registration notification from watcher +func (m *Stub) NotifyRegistrationStatus(ctx context.Context, status *watcherapi.RegistrationStatus) (*watcherapi.RegistrationStatusResponse, error) { + if m.registrationStatus != nil { + m.registrationStatus <- *status + } + if !status.PluginRegistered { + log.Printf("Registration failed: %v", status.Error) + } + return &watcherapi.RegistrationStatusResponse{}, nil +} + +// Register registers the resource plugin for the given resourceName with Kubelet. +func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir string) error { + if pluginSockDir != "" { + if _, err := os.Stat(pluginSockDir + "DEPRECATION"); err == nil { + log.Println("Deprecation file found. Skip registration.") + return nil + } + } + log.Println("Deprecation file not found. Invoke registration") + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + conn, err := grpc.DialContext(ctx, kubeletEndpoint, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", addr) + })) + if err != nil { + return err + } + defer conn.Close() + client := pluginapi.NewRegistrationClient(conn) + reqt := &pluginapi.RegisterRequest{ + Version: pluginapi.Version, + Endpoint: path.Base(m.socket), + ResourceName: resourceName, + Options: &pluginapi.ResourcePluginOptions{ + PreStartRequired: m.preStartContainerFlag, + }, + } + + _, err = client.Register(context.Background(), reqt) + if err != nil { + return err + } + return nil +} + +// GetResourcePluginOptions returns ResourcePluginOptions settings for the resource plugin. +func (m *Stub) GetResourcePluginOptions(ctx context.Context, e *pluginapi.Empty) (*pluginapi.ResourcePluginOptions, error) { + options := &pluginapi.ResourcePluginOptions{ + PreStartRequired: m.preStartContainerFlag, + } + return options, nil +} + +// PreStartContainer resets the resources received +func (m *Stub) PreStartContainer(ctx context.Context, r *pluginapi.PreStartContainerRequest) (*pluginapi.PreStartContainerResponse, error) { + log.Printf("PreStartContainer, %+v", r) + return &pluginapi.PreStartContainerResponse{}, nil +} + +// Allocate does a mock allocation +func (m *Stub) Allocate(ctx context.Context, r *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) { + log.Printf("Allocate, %+v", r) + + return m.allocFunc1(r) +} + +func (m *Stub) cleanup() error { + if err := os.Remove(m.socket); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// GetResourcesAllocation returns allocation results of corresponding resources +func (m *Stub) GetResourcesAllocation(ctx context.Context, r *pluginapi.GetResourcesAllocationRequest) (*pluginapi.GetResourcesAllocationResponse, error) { + log.Printf("GetResourcesAllocation, %+v", r) + return m.allocFunc2(r) +} + +// GetTopologyAwareResources returns allocation results of corresponding resources as topology aware format +func (m *Stub) GetTopologyAwareResources(ctx context.Context, r *pluginapi.GetTopologyAwareResourcesRequest) (*pluginapi.GetTopologyAwareResourcesResponse, error) { + log.Printf("GetTopologyAwareResources, %+v", r) + return &pluginapi.GetTopologyAwareResourcesResponse{}, nil +} + +// GetTopologyAwareResources returns corresponding allocatable resources as topology aware format +func (m *Stub) GetTopologyAwareAllocatableResources(ctx context.Context, r *pluginapi.GetTopologyAwareAllocatableResourcesRequest) (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) { + log.Printf("GetTopologyAwareAllocatableResources, %+v", r) + return &pluginapi.GetTopologyAwareAllocatableResourcesResponse{}, nil +} + +// GetTopologyHints returns hints of corresponding resources +func (m *Stub) GetTopologyHints(ctx context.Context, r *pluginapi.ResourceRequest) (*pluginapi.ResourceHintsResponse, error) { + log.Printf("GetTopologyHints, %+v", r) + return &pluginapi.ResourceHintsResponse{}, nil +} + +// Notify the resource plugin that the pod has beed deleted, +// and the plugin should do some clear-up work. +func (m *Stub) RemovePod(ctx context.Context, r *pluginapi.RemovePodRequest) (*pluginapi.RemovePodResponse, error) { + log.Printf("RemovePod, %+v", r) + return &pluginapi.RemovePodResponse{}, nil +} diff --git a/pkg/kubelet/cm/qosresourcemanager/topology_hints.go b/pkg/kubelet/cm/qosresourcemanager/topology_hints.go new file mode 100644 index 0000000000000..73f0ce1251821 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/topology_hints.go @@ -0,0 +1,173 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qosresourcemanager + +import ( + "context" + "math" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/metrics" + maputil "k8s.io/kubernetes/pkg/util/maps" +) + +func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { + // [TODO] need to implement when apply pod scode affinity for qos resource manager + return nil +} + +// GetTopologyHints implements the TopologyManager HintProvider Interface which +// ensures the Resource Manager is consulted when Topology Aware Hints for each +// container are created. +func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { + if pod == nil || container == nil { + klog.Errorf("[qosresourcemanager] GetTopologyHints got nil pod: %v or container: %v", pod, container) + return nil + } + + if isSkippedPod(pod, true) { + klog.V(4).Infof("[qosresourcemanager] skip get topology hints for pod") + return nil + } + + containerType, containerIndex, err := GetContainerTypeAndIndex(pod, container) + + if err != nil { + klog.Errorf("[qosresourcemanager] GetContainerTypeAndIndex failed with error: %v", err) + return nil + } + + // Garbage collect any stranded resource resources before providing TopologyHints + m.UpdateAllocatedResources() + + // Loop through all resources and generate TopologyHints for them. + resourceHints := make(map[string][]topologymanager.TopologyHint) + for resourceObj, requestedObj := range container.Resources.Requests { + reqResource := string(resourceObj) + requested := int(requestedObj.Value()) + + resource, err := m.getMappedResourceName(reqResource, container.Resources.Requests) + + if err != nil { + klog.Errorf("[qosresourcemanager] getMappedResourceName failed with error: %v", err) + return nil + } + + klog.Infof("[qosresourcemanager] pod: %s/%s container: %s needs %d %s, to get topology hint", + pod.Namespace, pod.Name, container.Name, requested, resource) + + // Only consider resources associated with a resource plugin. + if m.isResourcePluginResource(resource) && !requestedObj.IsZero() { + // Only consider resources that are actually with topology alignment + if aligned := m.resourceHasTopologyAlignment(resource); !aligned { + klog.Infof("[qosresourcemanager] resource '%v' does not have a topology preference", resource) + resourceHints[resource] = nil + continue + } + + // Short circuit to regenerate the same hints if there are already + // resources allocated to the Container. This might happen after a + // kubelet restart, for example. + allocationInfo := m.podResources.containerResource(string(pod.UID), container.Name, resource) + if allocationInfo != nil && allocationInfo.ResourceHints != nil && len(allocationInfo.ResourceHints.Hints) > 0 { + + allocated := int(math.Ceil(allocationInfo.AllocatedQuantity)) + + if allocationInfo.IsScalarResource && allocated >= requested { + resourceHints[resource] = ParseListOfTopologyHints(allocationInfo.ResourceHints) + klog.Warningf("[qosresourcemanager] resource %s already allocated to (pod %s/%s, container %v) with larger number than request: requested: %d, allocated: %d; not to getTopologyHints", + resource, pod.GetNamespace(), pod.GetName(), container.Name, requested, allocated) + continue + } else { + klog.Warningf("[qosresourcemanager] resource %s already allocated to (pod %s/%s, container %v) with smaller number than request: requested: %d, allocated: %d; continue to getTopologyHints", + resource, pod.GetNamespace(), pod.GetName(), container.Name, requested, int(math.Ceil(allocationInfo.AllocatedQuantity))) + } + } + + startRPCTime := time.Now() + m.mutex.Lock() + eI, ok := m.endpoints[resource] + m.mutex.Unlock() + if !ok { + klog.Errorf("[qosresourcemanager] unknown Resource Plugin %s", resource) + resourceHints[resource] = []topologymanager.TopologyHint{} + continue + } + + klog.Infof("[qosresourcemanager] making GetTopologyHints request of %.3f resources %s for pod: %s/%s, container: %s", + ParseQuantityToFloat64(requestedObj), resource, pod.Namespace, pod.Name, container.Name) + + resourceReq := &pluginapi.ResourceRequest{ + PodUid: string(pod.GetUID()), + PodNamespace: pod.GetNamespace(), + PodName: pod.GetName(), + ContainerName: container.Name, + ContainerType: containerType, + ContainerIndex: containerIndex, + PodRole: pod.Labels[pluginapi.PodRoleLabelKey], + PodType: pod.Annotations[pluginapi.PodTypeAnnotationKey], + Labels: maputil.CopySS(pod.Labels), + Annotations: maputil.CopySS(pod.Annotations), + // use mapped resource name in "ResourceName" to indicates which endpoint to request + ResourceName: resource, + // use original requested resource name in "ResourceRequests" in order to make plugin identity real requested resource name + ResourceRequests: map[string]float64{reqResource: ParseQuantityToFloat64(requestedObj)}, + } + + resp, err := eI.e.getTopologyHints(context.Background(), resourceReq) + metrics.ResourcePluginGetTopologyHintsDuration.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime)) + if err != nil { + klog.Errorf("[qosresourcemanager] call GetTopologyHints of %s resource plugin for pod: %s/%s, container: %s failed with error: %v", + resource, pod.GetNamespace(), pod.GetName(), container.Name, err) + + // empty TopologyHint list will cause fail in restricted topology manager policy + // nil TopologyHint list assumes no NUMA preference + resourceHints[resource] = []topologymanager.TopologyHint{} + continue + } + + // think about a resource name with accompanying resources, + // we must return union result of all accompanying resources in the resource name + resourceHints[resource] = ParseListOfTopologyHints(resp.ResourceHints[resource]) + + klog.Infof("[qosresourcemanager] GetTopologyHints for resource: %s, pod: %s/%s; container: %s, result: %+v", + resource, pod.Namespace, pod.Name, container.Name, resourceHints[resource]) + } + } + + return resourceHints +} + +func (m *ManagerImpl) resourceHasTopologyAlignment(resource string) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + eI, ok := m.endpoints[resource] + if !ok { + return false + } + + if eI.opts == nil || !eI.opts.WithTopologyAlignment { + klog.V(4).Infof("[qosresourcemanager] resource plugin options indicates that resource: %s without topology alignment", resource) + return false + } + + return true +} diff --git a/pkg/kubelet/cm/qosresourcemanager/topology_hints_test.go b/pkg/kubelet/cm/qosresourcemanager/topology_hints_test.go new file mode 100644 index 0000000000000..3866b2e4f440c --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/topology_hints_test.go @@ -0,0 +1,208 @@ +package qosresourcemanager + +import ( + "context" + "io/ioutil" + "os" + "reflect" + "sort" + "testing" + + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" +) + +type mockAffinityStore struct { + hint topologymanager.TopologyHint +} + +func (m *mockAffinityStore) GetAffinity(podUID string, containerName string) topologymanager.TopologyHint { + return m.hint +} + +func makeSocketMask(sockets ...int) bitmask.BitMask { + mask, _ := bitmask.NewBitMask(sockets...) + return mask +} + +// since topology allocation logic isn't performed in manager (decided only by qrm plugin) +// so topology hints testing is more like endpoint +// todo. since we don't have setGetTopologyHints for stub, so it's invalid to test allocation logic for now. +func TestGetTopologyHints(t *testing.T) { + p1, e1 := eSetup(t, "/tmp/mock_1", "mock_res") + defer eCleanup(t, p1, e1) + + e2 := endpointInfo{e: &MockEndpoint{topologyHints: func(ctx context.Context, resourceRequest *pluginapi.ResourceRequest) (*pluginapi.ResourceHintsResponse, error) { + resp := &pluginapi.ResourceHintsResponse{} + resp.ResourceHints = make(map[string]*pluginapi.ListOfTopologyHints) + resp.ResourceHints["mock_res_1"] = &pluginapi.ListOfTopologyHints{ + Hints: []*pluginapi.TopologyHint{ + {Nodes: []uint64{0}, Preferred: true}, + {Nodes: []uint64{1}, Preferred: true}, + {Nodes: []uint64{0, 1}, Preferred: false}, + }, + } + + return resp, nil + }}, opts: &pluginapi.ResourcePluginOptions{ + WithTopologyAlignment: true, + }} + + hint0, _ := bitmask.NewBitMask(0) + hint1, _ := bitmask.NewBitMask(1) + hint2, _ := bitmask.NewBitMask(0, 1) + + testCases := []struct { + description string + pod *v1.Pod + endpoints map[string]endpointInfo + expectedHints map[string][]topologymanager.TopologyHint + }{ + { + description: "skipped pod should not have hints", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {Kind: DaemonsetKind}, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName("mock_res_1"): resource.MustParse("2"), + }, + }, + }, + }, + }, + }, + }, + { + description: "resources without plugin should not have hints", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName("mock_res_1"): resource.MustParse("2"), + }, + }, + }, + }, + }, + }, + }, + { + description: "resources with zero request should not have hints", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName("mock_res_1"): resource.MustParse("0"), + }, + }, + }, + }, + }, + }, + endpoints: map[string]endpointInfo{"mock_res_1": {e: e1}}, + }, + { + description: "resources with request and endpoint should have hints", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName("mock_res_1"): resource.MustParse("2"), + }, + }, + }, + }, + }, + }, + endpoints: map[string]endpointInfo{"mock_res_1": e2}, + expectedHints: map[string][]topologymanager.TopologyHint{ + "mock_res_1": { + {NUMANodeAffinity: hint0, Preferred: true}, + {NUMANodeAffinity: hint1, Preferred: true}, + {NUMANodeAffinity: hint2, Preferred: false}, + }, + }, + }, + } + + for _, tc := range testCases { + m := ManagerImpl{ + podResources: newPodResourcesChk(), + sourcesReady: &sourcesReadyStub{}, + activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod} }, + endpoints: tc.endpoints, + } + + hints := m.GetTopologyHints(tc.pod, &tc.pod.Spec.Containers[0]) + for r := range tc.expectedHints { + sort.SliceStable(hints[r], func(i, j int) bool { + return hints[r][i].LessThan(hints[r][j]) + }) + sort.SliceStable(tc.expectedHints[r], func(i, j int) bool { + return tc.expectedHints[r][i].LessThan(tc.expectedHints[r][j]) + }) + if !reflect.DeepEqual(hints[r], tc.expectedHints[r]) { + t.Errorf("%v: Expected result to be %v, got %v", tc.description, tc.expectedHints[r], hints[r]) + } + } + } +} + +func TestResourceHasTopologyAlignment(t *testing.T) { + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + } + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI), + } + testResources := make([]TestResource, 2) + testResources = append(testResources, res1) + testResources = append(testResources, res2) + as := require.New(t) + + testPods := []*v1.Pod{ + makePod("Pod0", v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity}), + makePod("Pod1", v1.ResourceList{ + v1.ResourceName(res2.resourceName): res2.resourceQuantity}), + } + testPods[0].Spec.Containers[0].Name = "Cont0" + testPods[1].Spec.Containers[0].Name = "Cont1" + podsStub := activePodsStub{ + activePods: []*v1.Pod{}, + } + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) + as.Nil(err) + + //Doesn't have + testManager.endpoints[res1.resourceName].opts.WithTopologyAlignment = false + Alignment := testManager.resourceHasTopologyAlignment(res1.resourceName) + as.Equal(false, Alignment) + //Has + testManager.endpoints[res2.resourceName].opts.WithTopologyAlignment = true + Alignment = testManager.resourceHasTopologyAlignment(res2.resourceName) + as.Equal(true, Alignment) +} diff --git a/pkg/kubelet/cm/qosresourcemanager/types.go b/pkg/kubelet/cm/qosresourcemanager/types.go new file mode 100644 index 0000000000000..f3599ec8a0c9b --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/types.go @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qosresourcemanager + +import ( + "time" + + v1 "k8s.io/api/core/v1" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" + "k8s.io/kubernetes/pkg/kubelet/status" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +// ActivePodsFunc is a function that returns a list of pods to reconcile. +type ActivePodsFunc func() []*v1.Pod + +type runtimeService interface { + UpdateContainerResources(id string, resources *runtimeapi.LinuxContainerResources) error +} + +// Manager manages all the Resource Plugins running on a node. +type Manager interface { + // Start starts resource plugin registration service. + Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podstatusprovider status.PodStatusProvider, containerRuntime runtimeService) error + + // Allocate configures and assigns resources to a container in a pod. From + // the requested resource resources, Allocate will communicate with the + // owning resource plugin to allow setup procedures to take place, and for + // the resource plugin to provide runtime settings to use the resource + // (oci supported resource results, environment variables, annotations, ...). + Allocate(pod *v1.Pod, container *v1.Container) error + + // UpdatePluginResources updates node resources based on resources already + // allocated to pods. The node object is provided for the resource manager to + // update the node capacity to reflect the currently available resources. + UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error + + // Stop stops the manager. + Stop() error + + // GetCapacity returns the amount of available resource plugin resource capacity, resource allocatable + // and inactive resource plugin resources previously registered on the node. + GetCapacity() (v1.ResourceList, v1.ResourceList, []string) + + // UpdateAllocatedResources frees any resources that are bound to terminated pods. + UpdateAllocatedResources() + + // TopologyManager HintProvider provider indicates the Resource Manager implements the Topology Manager Interface + // and is consulted to make Topology aware resource alignments in container scope. + GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint + + // TopologyManager HintProvider provider indicates the Resource Manager implements the Topology Manager Interface + // and is consulted to make Topology aware resource alignments in pod scope. + GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint + + // GetResourceRunContainerOptions checks whether we have cached containerResources + // for the passed-in and returns its ResourceRunContainerOptions + // for the found one. An empty struct is returned in case no cached state is found. + GetResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) + + // GetTopologyAwareResources returns information about the resources assigned to pods and containers + // and organized as topology aware format. + GetTopologyAwareResources(pod *v1.Pod, container *v1.Container) (*pluginapi.GetTopologyAwareResourcesResponse, error) + + // GetTopologyAwareAllocatableResources returns information about the resources assigned to pods and containers + // and organized as topology aware format. + GetTopologyAwareAllocatableResources() (*pluginapi.GetTopologyAwareAllocatableResourcesResponse, error) + + // ShouldResetExtendedResourceCapacity returns whether the extended resources should be reset or not, + // depending on the checkpoint file availability. Absence of the checkpoint file strongly indicates + // the node has been recreated. + ShouldResetExtendedResourceCapacity() bool + + // support probe based plugin discovery mechanism in qos resource manager + GetWatcherHandler() cache.PluginHandler +} + +// TODO: evaluate whether we need these error definitions. +const ( + // errFailedToDialResourcePlugin is the error raised when the resource plugin could not be + // reached on the registered socket + errFailedToDialResourcePlugin = "failed to dial resource plugin:" + // errUnsupportedVersion is the error raised when the resource plugin uses an API version not + // supported by the Kubelet registry + errUnsupportedVersion = "requested API version %q is not supported by kubelet. Supported version is %q" + // errInvalidResourceName is the error raised when a resource plugin is registering + // itself with an invalid ResourceName + errInvalidResourceName = "the ResourceName %q is invalid" + // errEndpointStopped indicates that the endpoint has been stopped + errEndpointStopped = "endpoint %v has been stopped" + // errBadSocket is the error raised when the registry socket path is not absolute + errBadSocket = "bad socketPath, must be an absolute path:" + // errListenSocket is the error raised when the registry could not listen on the socket + errListenSocket = "failed to listen to socket while starting resource plugin registry, with error" +) + +// endpointStopGracePeriod indicates the grace period after an endpoint is stopped +// because its resource plugin fails. QoSResourceManager keeps the stopped endpoint in its +// cache during this grace period to cover the time gap for the capacity change to +// take effect. +const endpointStopGracePeriod = time.Duration(5) * time.Minute + +// kubeletQoSResourceManagerCheckpoint is the file name of resource plugin checkpoint +const kubeletQoSResourceManagerCheckpoint = "kubelet_qrm_checkpoint" diff --git a/pkg/kubelet/cm/qosresourcemanager/utils.go b/pkg/kubelet/cm/qosresourcemanager/utils.go new file mode 100644 index 0000000000000..879aa6eea4e47 --- /dev/null +++ b/pkg/kubelet/cm/qosresourcemanager/utils.go @@ -0,0 +1,252 @@ +package qosresourcemanager + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog/v2" + pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +// with highest precision 0.001 +func ParseQuantityToFloat64(quantity resource.Quantity) float64 { + return float64(quantity.MilliValue()) / 1000.0 +} + +func ParseTopologyManagerHint(hint topologymanager.TopologyHint) *pluginapi.TopologyHint { + var nodes []uint64 + + if hint.NUMANodeAffinity != nil { + bits := hint.NUMANodeAffinity.GetBits() + + for _, node := range bits { + nodes = append(nodes, uint64(node)) + } + } + + return &pluginapi.TopologyHint{ + Nodes: nodes, + Preferred: hint.Preferred, + } +} + +func ParseListOfTopologyHints(hintsList *pluginapi.ListOfTopologyHints) []topologymanager.TopologyHint { + if hintsList == nil { + return nil + } + + resultHints := make([]topologymanager.TopologyHint, 0, len(hintsList.Hints)) + + for _, hint := range hintsList.Hints { + if hint != nil { + + mask := bitmask.NewEmptyBitMask() + + for _, node := range hint.Nodes { + mask.Add(int(node)) + } + + resultHints = append(resultHints, topologymanager.TopologyHint{ + NUMANodeAffinity: mask, + Preferred: hint.Preferred, + }) + } + } + + return resultHints +} + +func IsInitContainerOfPod(pod *v1.Pod, container *v1.Container) bool { + if pod == nil || container == nil { + return false + } + + n := len(pod.Spec.InitContainers) + + for i := 0; i < n; i++ { + if pod.Spec.InitContainers[i].Name == container.Name { + return true + } + } + + return false +} + +func findContainerIDByName(status *v1.PodStatus, name string) (string, error) { + if status == nil { + return "", fmt.Errorf("findContainerIDByName got nil status") + } + + allStatuses := status.InitContainerStatuses + allStatuses = append(allStatuses, status.ContainerStatuses...) + for _, container := range allStatuses { + if container.Name == name && container.ContainerID != "" { + cid := &kubecontainer.ContainerID{} + err := cid.ParseString(container.ContainerID) + if err != nil { + return "", err + } + + return cid.ID, nil + } + } + return "", fmt.Errorf("unable to find ID for container with name %v in pod status (it may not be running)", name) +} + +func isDaemonPod(pod *v1.Pod) bool { + if pod == nil { + return false + } + + for i := 0; i < len(pod.OwnerReferences); i++ { + if pod.OwnerReferences[i].Kind == DaemonsetKind { + return true + } + } + + return false +} + +// [TODO]: to discuss use katalyst qos level or daemon label to skip pods +func isSkippedPod(pod *v1.Pod, isFirstAdmit bool) bool { + // [TODO](sunjianyu): consider other types of pods need to be skipped + if pod == nil { + return true + } + + if isFirstAdmit && IsPodSkipFirstAdmit(pod) { + return true + } + + return isDaemonPod(pod) && !IsPodKatalystQoSLevelSystemCores(pod) +} + +func isSkippedContainer(pod *v1.Pod, container *v1.Container) bool { + // [TODO](sunjianyu): + // 1. we skip init container currently and if needed we should implement reuse strategy later + // 2. consider other types of containers need to be skipped + containerType, _, err := GetContainerTypeAndIndex(pod, container) + + if err != nil { + klog.Errorf("GetContainerTypeAndIndex failed with error: %v", err) + return false + } + + return containerType == pluginapi.ContainerType_INIT +} + +func GetContainerTypeAndIndex(pod *v1.Pod, container *v1.Container) (containerType pluginapi.ContainerType, containerIndex uint64, err error) { + if pod == nil || container == nil { + err = fmt.Errorf("got nil pod: %v or container: %v", pod, container) + return + } + + foundContainer := false + + for i, initContainer := range pod.Spec.InitContainers { + if container.Name == initContainer.Name { + foundContainer = true + containerType = pluginapi.ContainerType_INIT + containerIndex = uint64(i) + break + } + } + + if !foundContainer { + mainContainerName := pod.Annotations[MainContainerNameAnnotationKey] + + if mainContainerName == "" && len(pod.Spec.Containers) > 0 { + mainContainerName = pod.Spec.Containers[0].Name + } + + for i, appContainer := range pod.Spec.Containers { + if container.Name == appContainer.Name { + foundContainer = true + + if container.Name == mainContainerName { + containerType = pluginapi.ContainerType_MAIN + } else { + containerType = pluginapi.ContainerType_SIDECAR + } + + containerIndex = uint64(i) + break + } + } + } + + if !foundContainer { + err = fmt.Errorf("GetContainerTypeAndIndex doesn't find container: %s in pod: %s/%s", container.Name, pod.Namespace, pod.Name) + } + + return +} + +func canSkipEndpointError(pod *v1.Pod, resource string) bool { + if pod == nil { + return false + } + + if IsPodKatalystQoSLevelReclaimedCores(pod) { + return false + } + + if IsPodKatalystQoSLevelDedicatedCores(pod) { + return false + } + + if IsPodKatalystQoSLevelSystemCores(pod) { + return false + } + + if IsPodKatalystQoSLevelSharedCores(pod) { + return true + } + + return false +} + +func IsPodKatalystQoSLevelDedicatedCores(pod *v1.Pod) bool { + if pod == nil { + return false + } + + return pod.Annotations[pluginapi.KatalystQoSLevelAnnotationKey] == pluginapi.KatalystQoSLevelDedicatedCores +} + +func IsPodKatalystQoSLevelSharedCores(pod *v1.Pod) bool { + if pod == nil { + return false + } + + return pod.Annotations[pluginapi.KatalystQoSLevelAnnotationKey] == pluginapi.KatalystQoSLevelSharedCores || + pod.Annotations[pluginapi.KatalystQoSLevelAnnotationKey] == "" +} + +func IsPodKatalystQoSLevelReclaimedCores(pod *v1.Pod) bool { + if pod == nil { + return false + } + + return pod.Annotations[pluginapi.KatalystQoSLevelAnnotationKey] == pluginapi.KatalystQoSLevelReclaimedCores +} + +func IsPodKatalystQoSLevelSystemCores(pod *v1.Pod) bool { + if pod == nil { + return false + } + + return pod.Annotations[pluginapi.KatalystQoSLevelAnnotationKey] == pluginapi.KatalystQoSLevelSystemCores +} + +func IsPodSkipFirstAdmit(pod *v1.Pod) bool { + if pod == nil { + return false + } + + return pod.Annotations[pluginapi.KatalystSkipQRMAdmitAnnotationKey] == pluginapi.KatalystValueTrue +} diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index a9be02a32d600..89ba48152cfea 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -46,6 +46,7 @@ type HandlerRunner interface { // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error) + GenerateResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*ResourceRunContainerOptions, error) GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) // GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host // of a pod. diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 0810e938f2675..74cb75da8a9be 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -424,6 +424,16 @@ type DeviceInfo struct { Permissions string } +// ResourceRunContainerOptions contains the combined container runtime settings to consume its allocated resources. +type ResourceRunContainerOptions struct { + // The environment variables list. + Envs []EnvVar + // The Annotations for the container + Annotations []Annotation + // OCI Linux container resources to applied for containers + Resources *runtimeapi.LinuxContainerResources +} + // RunContainerOptions specify the options which are necessary for running containers type RunContainerOptions struct { // The environment variables list. diff --git a/pkg/kubelet/container/testing/fake_runtime_helper.go b/pkg/kubelet/container/testing/fake_runtime_helper.go index a7d13d606cea4..d51108473cec9 100644 --- a/pkg/kubelet/container/testing/fake_runtime_helper.go +++ b/pkg/kubelet/container/testing/fake_runtime_helper.go @@ -17,7 +17,7 @@ limitations under the License. package testing import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -34,6 +34,19 @@ type FakeRuntimeHelper struct { Err error } +func (f *FakeRuntimeHelper) GenerateCreatePodResourceOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) { + var opts kubecontainer.RunContainerOptions + if len(container.TerminationMessagePath) != 0 { + opts.PodContainerDir = f.PodContainerDir + } + return &opts, nil +} + +func (f *FakeRuntimeHelper) GenerateResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) { + var opts kubecontainer.ResourceRunContainerOptions + return &opts, nil +} + func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { var opts kubecontainer.RunContainerOptions if len(container.TerminationMessagePath) != 0 { diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index fdca83706ea36..6c366c5e4eb15 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1397,8 +1397,16 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { kl.containerLogManager.Start() // Adding Registration Callback function for CSI Driver kl.pluginManager.AddHandler(pluginwatcherapi.CSIPlugin, plugincache.PluginHandler(csi.PluginHandler)) - // Adding Registration Callback function for Device Manager - kl.pluginManager.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandler()) + + // Adding Registration Callback function for handlers provided by Container Manager + for handlerType, handler := range kl.containerManager.GetPluginRegistrationHandler() { + if handler == nil { + klog.Warningf("handlerType: %s has nil handler, skip it", handlerType) + continue + } + kl.pluginManager.AddHandler(handlerType, handler) + } + // Start the plugin manager klog.V(4).InfoS("Starting plugin manager") go kl.pluginManager.Run(kl.sourcesReady, wait.NeverStop) @@ -2410,7 +2418,7 @@ func (kl *Kubelet) ListenAndServePodResources() { klog.V(2).InfoS("Failed to get local endpoint for PodResources endpoint", "err", err) return } - server.ListenAndServePodResources(socket, kl.podManager, kl.containerManager, kl.containerManager, kl.containerManager) + server.ListenAndServePodResources(socket, kl.podManager, kl.containerManager, kl.containerManager, kl.containerManager, kl.containerManager) } // Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around. diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index aabb75bcd2e15..62569d3fe9c08 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -624,7 +624,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { setters = append(setters, nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc), nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity, - kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent), + kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetResourcePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent), nodestatus.VersionInfo(kl.cadvisor.VersionInfo, kl.containerRuntime.Type, kl.containerRuntime.Version), nodestatus.DaemonEndpoints(kl.daemonEndpoints), nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList), diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 5f7adecf5e8c8..ebacdc503e5c5 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -2056,3 +2056,9 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { } return false } + +// GenerateRunContainerOptions generates the RunContainerOptions of resources allocated by QoSResourceManager, which can be used by +// the container runtime to set parameters for launching a container. +func (kl *Kubelet) GenerateResourceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*kubecontainer.ResourceRunContainerOptions, error) { + return kl.containerManager.GetResourceRunContainerOptions(pod, container) +} diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 680eab7a56e9f..cd58f84514588 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -354,7 +354,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai Value: e.Value, } } - config.Envs = envs + config.Envs = append(config.Envs, envs...) return config, cleanupAction, nil } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go index 25917803b1ce3..2ae67e38569b7 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go @@ -20,6 +20,7 @@ limitations under the License. package kuberuntime import ( + "fmt" "strconv" "time" @@ -39,20 +40,67 @@ import ( // applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig. func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error { + + if config == nil { + return fmt.Errorf("applyPlatformSpecificContainerConfig met nil input config") + } + + if pod == nil || container == nil { + return fmt.Errorf("applyPlatformSpecificContainerConfig met nil pod or container") + } + enforceMemoryQoS := false // Set memory.min and memory.high if MemoryQoS enabled with cgroups v2 if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) && libcontainercgroups.IsCgroup2UnifiedMode() { enforceMemoryQoS = true } - config.Linux = m.generateLinuxContainerConfig(container, pod, uid, username, nsTarget, enforceMemoryQoS) + + var opts *kubecontainer.ResourceRunContainerOptions + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.QoSResourceManager) { + var err error + opts, err = m.runtimeHelper.GenerateResourceRunContainerOptions(pod, container) + + if err != nil { + klog.Errorf("[applyPlatformSpecificContainerConfig] pod: %s/%s, containerName: %s GenerateResourceRunContainerOptions failed with error: %v", + pod.Namespace, pod.Name, container.Name, err) + return fmt.Errorf("GenerateResourceRunContainerOptions failed with error: %v", err) + } + + if config.Annotations == nil { + config.Annotations = make(map[string]string) + } + + if opts != nil { + for _, anno := range opts.Annotations { + config.Annotations[anno.Name] = anno.Value + } + + for _, env := range opts.Envs { + config.Envs = append(config.Envs, &runtimeapi.KeyValue{ + Key: env.Name, + Value: env.Value, + }) + } + } + } + + config.Linux = m.generateLinuxContainerConfig(container, pod, uid, username, nsTarget, enforceMemoryQoS, opts, config.Annotations) return nil } // generateLinuxContainerConfig generates linux container config for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) *runtimeapi.LinuxContainerConfig { +func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool, opts *kubecontainer.ResourceRunContainerOptions, configAnnotations map[string]string) *runtimeapi.LinuxContainerConfig { + + resourceConfig := &runtimeapi.LinuxContainerResources{} + + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.QoSResourceManager) && opts != nil && opts.Resources != nil { + resourceConfig = opts.Resources + } + + // TODO(sunjianyu): consider if we should make results from qos resource manager override native action results? lc := &runtimeapi.LinuxContainerConfig{ - Resources: &runtimeapi.LinuxContainerResources{}, + Resources: resourceConfig, SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username), } @@ -62,7 +110,7 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C } // set linux container resources - lc.Resources = m.calculateLinuxResources(container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), container.Resources.Limits.Memory()) + m.calculateLinuxResources(lc.Resources, container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), container.Resources.Limits.Memory()) lc.Resources.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container, int64(m.machineInfo.MemoryCapacity))) @@ -128,8 +176,7 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C } // calculateLinuxResources will create the linuxContainerResources type based on the provided CPU and memory resource requests, limits -func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit *resource.Quantity) *runtimeapi.LinuxContainerResources { - resources := runtimeapi.LinuxContainerResources{} +func (m *kubeGenericRuntimeManager) calculateLinuxResources(resources *runtimeapi.LinuxContainerResources, cpuRequest, cpuLimit, memoryLimit *resource.Quantity) { var cpuShares int64 memLimit := memoryLimit.Value() @@ -160,8 +207,6 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit resources.CpuQuota = cpuQuota resources.CpuPeriod = cpuPeriod } - - return &resources } // GetHugepageLimitsFromResources returns limits of each hugepages from resources. diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go index 29d0bbfc9b40c..c63ccf9de8f98 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go @@ -64,9 +64,11 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde Stdin: container.Stdin, StdinOnce: container.StdinOnce, Tty: container.TTY, - Linux: m.generateLinuxContainerConfig(container, pod, new(int64), "", nil, enforceMemoryQoS), - Envs: envs, + Linux: m.generateLinuxContainerConfig(container, pod, new(int64), "", nil, enforceMemoryQoS, nil, newContainerAnnotations(container, pod, restartCount, opts)), } + + expectedConfig.Envs = append(expectedConfig.Envs, envs...) + return expectedConfig } @@ -215,7 +217,7 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) { }, } - linuxConfig := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false) + linuxConfig := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false, nil, nil) assert.Equal(t, test.expected.CpuPeriod, linuxConfig.GetResources().CpuPeriod, test.name) assert.Equal(t, test.expected.CpuQuota, linuxConfig.GetResources().CpuQuota, test.name) assert.Equal(t, test.expected.CpuShares, linuxConfig.GetResources().CpuShares, test.name) @@ -262,7 +264,8 @@ func TestCalculateLinuxResources(t *testing.T) { }, } for _, test := range tests { - linuxContainerResources := m.calculateLinuxResources(&test.cpuReq, &test.cpuLim, &test.memLim) + linuxContainerResources := &runtimeapi.LinuxContainerResources{} + m.calculateLinuxResources(linuxContainerResources, &test.cpuReq, &test.cpuLim, &test.memLim) assert.Equal(t, test.expected, linuxContainerResources) } } @@ -338,7 +341,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { name: "Request128MBLimit256MB", pod: pod1, expected: &expectedResult{ - m.generateLinuxContainerConfig(&pod1.Spec.Containers[0], pod1, new(int64), "", nil, true), + m.generateLinuxContainerConfig(&pod1.Spec.Containers[0], pod1, new(int64), "", nil, true, nil, nil), 128 * 1024 * 1024, int64(float64(256*1024*1024) * m.memoryThrottlingFactor), }, @@ -347,7 +350,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { name: "Request128MBWithoutLimit", pod: pod2, expected: &expectedResult{ - m.generateLinuxContainerConfig(&pod2.Spec.Containers[0], pod2, new(int64), "", nil, true), + m.generateLinuxContainerConfig(&pod2.Spec.Containers[0], pod2, new(int64), "", nil, true, nil, nil), 128 * 1024 * 1024, int64(pod2MemoryHigh), }, @@ -355,7 +358,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { } for _, test := range tests { - linuxConfig := m.generateLinuxContainerConfig(&test.pod.Spec.Containers[0], test.pod, new(int64), "", nil, true) + linuxConfig := m.generateLinuxContainerConfig(&test.pod.Spec.Containers[0], test.pod, new(int64), "", nil, true, nil, nil) assert.Equal(t, test.expected.containerConfig, linuxConfig, test.name) assert.Equal(t, linuxConfig.GetResources().GetUnified()["memory.min"], strconv.FormatInt(test.expected.memoryLow, 10), test.name) assert.Equal(t, linuxConfig.GetResources().GetUnified()["memory.high"], strconv.FormatInt(test.expected.memoryHigh, 10), test.name) @@ -578,7 +581,7 @@ func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - got := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target, false) + got := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target, false, nil, nil) if diff := cmp.Diff(tc.want, got.SecurityContext.NamespaceOptions); diff != "" { t.Errorf("%v: diff (-want +got):\n%v", t.Name(), diff) } @@ -669,7 +672,7 @@ func TestGenerateLinuxContainerConfigSwap(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { m.memorySwapBehavior = tc.swapSetting - actual := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", nil, false) + actual := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", nil, false, nil, nil) assert.Equal(t, tc.expected, actual.Resources.MemorySwapLimitInBytes, "memory swap config for %s", tc.name) }) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go index 646ee7f23cce9..7743953b4a095 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go @@ -33,15 +33,17 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod) // For overhead, we do not differentiate between requests and limits. Treat this overhead // as "guaranteed", with requests == limits - resources = m.calculateLinuxResources(cpu, cpu, memory) + m.calculateLinuxResources(resources, cpu, cpu, memory) } return resources } func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources { + resources := &runtimeapi.LinuxContainerResources{} req, lim := resourcehelper.PodRequestsAndLimitsWithoutOverhead(pod) - return m.calculateLinuxResources(req.Cpu(), lim.Cpu(), lim.Memory()) + m.calculateLinuxResources(resources, req.Cpu(), lim.Cpu(), lim.Memory()) + return resources } func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error { diff --git a/pkg/kubelet/metrics/metrics.go b/pkg/kubelet/metrics/metrics.go index 53b04c0589750..6cfc32e07d397 100644 --- a/pkg/kubelet/metrics/metrics.go +++ b/pkg/kubelet/metrics/metrics.go @@ -60,6 +60,7 @@ const ( // Metrics keys of device plugin operations DevicePluginRegistrationCountKey = "device_plugin_registration_total" DevicePluginAllocationDurationKey = "device_plugin_alloc_duration_seconds" + // Metrics keys of pod resources operations PodResourcesEndpointRequestsTotalKey = "pod_resources_endpoint_requests_total" PodResourcesEndpointRequestsListKey = "pod_resources_endpoint_requests_list" @@ -67,6 +68,11 @@ const ( PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list" PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable" + // Metrics keys of resouce plugin operations + ResourcePluginRegistrationCountKey = "resource_plugin_registration_total" + ResourcePluginAllocationDurationKey = "resource_plugin_alloc_duration_seconds" + ResourcePluginGetTopologyHintsDurationKey = "resource_plugin_get_hints_duration_seconds" + // Metrics keys for RuntimeClass RunPodSandboxDurationKey = "run_podsandbox_duration_seconds" RunPodSandboxErrorsKey = "run_podsandbox_errors_total" @@ -290,7 +296,41 @@ var ( }, []string{"resource_name"}, ) - + // ResourcePluginRegistrationCount is a Counter that tracks the cumulative number of resource plugin registrations. + // Broken down by resource name. + ResourcePluginRegistrationCount = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: ResourcePluginRegistrationCountKey, + Help: "Cumulative number of resource plugin registrations. Broken down by resource name.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource_name"}, + ) + // ResourcePluginAllocationDuration is a Histogram that tracks the duration (in seconds) to serve a resource plugin allocation request. + // Broken down by resource name. + ResourcePluginAllocationDuration = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: KubeletSubsystem, + Name: ResourcePluginAllocationDurationKey, + Help: "Duration in seconds to serve a resource Allocation request. Broken down by resource name.", + Buckets: metrics.DefBuckets, + StabilityLevel: metrics.ALPHA, + }, + []string{"resource_name"}, + ) + // ResourcePluginGetTopologyHintsDuration is a Histogram that tracks the duration (in seconds) to serve a resource plugin GetTopologyHints request. + // Broken down by resource name. + ResourcePluginGetTopologyHintsDuration = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: KubeletSubsystem, + Name: ResourcePluginGetTopologyHintsDurationKey, + Help: "Duration in seconds to serve a resource GetTopologyHints request. Broken down by resource name.", + Buckets: metrics.DefBuckets, + StabilityLevel: metrics.ALPHA, + }, + []string{"resource_name"}, + ) // PodResourcesEndpointRequestsTotalCount is a Counter that tracks the cumulative number of requests to the PodResource endpoints. // Broken down by server API version. PodResourcesEndpointRequestsTotalCount = metrics.NewCounterVec( diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index 984cad5a619bb..deac29c76e0f3 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -232,6 +232,7 @@ func MachineInfo(nodeName string, machineInfoFunc func() (*cadvisorapiv1.MachineInfo, error), // typically Kubelet.GetCachedMachineInfo capacityFunc func() v1.ResourceList, // typically Kubelet.containerManager.GetCapacity devicePluginResourceCapacityFunc func() (v1.ResourceList, v1.ResourceList, []string), // typically Kubelet.containerManager.GetDevicePluginResourceCapacity + resourcePluginResourceCapacityFunc func() (v1.ResourceList, v1.ResourceList, []string), // typically Kubelet.containerManager.GetResourcePluginResourceCapacity nodeAllocatableReservationFunc func() v1.ResourceList, // typically Kubelet.containerManager.GetNodeAllocatableReservation recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent ) Setter { @@ -242,9 +243,9 @@ func MachineInfo(nodeName string, node.Status.Capacity = v1.ResourceList{} } - var devicePluginAllocatable v1.ResourceList - var devicePluginCapacity v1.ResourceList - var removedDevicePlugins []string + var devicePluginAllocatable, resourcePluginAllocatable v1.ResourceList + var devicePluginCapacity, resourcePluginCapacity v1.ResourceList + var removedDevicePlugins, removedResourcePlugins []string // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start // cAdvisor locally, e.g. for test-cmd.sh, and in integration test. @@ -292,6 +293,7 @@ func MachineInfo(nodeName string, } } + // Get allocatable, capacity and removed resources from device manager and set corresponding quantity in node status devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = devicePluginResourceCapacityFunc() for k, v := range devicePluginCapacity { if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() { @@ -312,6 +314,30 @@ func MachineInfo(nodeName string, // node status. node.Status.Capacity[v1.ResourceName(removedResource)] = *resource.NewQuantity(int64(0), resource.DecimalSI) } + + // Get allocatable, capacity and removed resources from qos resource manager and set corresponding quantity in node status + resourcePluginCapacity, resourcePluginAllocatable, removedResourcePlugins = resourcePluginResourceCapacityFunc() + if resourcePluginCapacity != nil { + for k, v := range resourcePluginCapacity { + if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() { + klog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) + } + node.Status.Capacity[k] = v + } + } + + for _, removedResource := range removedResourcePlugins { + klog.V(2).Infof("Set capacity for %s to 0 on resource removal", removedResource) + // Set the capacity of the removed resource to 0 instead of + // removing the resource from the node status. This is to indicate + // that the resource is managed by resource plugin and had been + // registered before. + // + // This is required to differentiate the resource plugin managed + // resources and the cluster-level resources, which are absent in + // node status. + node.Status.Capacity[v1.ResourceName(removedResource)] = *resource.NewQuantity(int64(0), resource.DecimalSI) + } } // Set Allocatable. @@ -345,6 +371,16 @@ func MachineInfo(nodeName string, } node.Status.Allocatable[k] = v } + + if resourcePluginAllocatable != nil { + for k, v := range resourcePluginAllocatable { + if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() { + klog.V(2).Infof("Update allocatable for %s to %d", k, v.Value()) + } + node.Status.Allocatable[k] = v + } + } + // for every huge page reservation, we need to remove it from allocatable memory for k, v := range node.Status.Capacity { if v1helper.IsHugePageResourceName(k) { diff --git a/pkg/kubelet/nodestatus/setters_test.go b/pkg/kubelet/nodestatus/setters_test.go index da339907eb24f..2f04a61a98b12 100644 --- a/pkg/kubelet/nodestatus/setters_test.go +++ b/pkg/kubelet/nodestatus/setters_test.go @@ -528,17 +528,18 @@ func TestMachineInfo(t *testing.T) { } cases := []struct { - desc string - node *v1.Node - maxPods int - podsPerCore int - machineInfo *cadvisorapiv1.MachineInfo - machineInfoError error - capacity v1.ResourceList - devicePluginResourceCapacity dprc - nodeAllocatableReservation v1.ResourceList - expectNode *v1.Node - expectEvents []testEvent + desc string + node *v1.Node + maxPods int + podsPerCore int + machineInfo *cadvisorapiv1.MachineInfo + machineInfoError error + capacity v1.ResourceList + devicePluginResourceCapacity dprc + resourcePluginResourceCapacity dprc + nodeAllocatableReservation v1.ResourceList + expectNode *v1.Node + expectEvents []testEvent }{ { desc: "machine identifiers, basic capacity and allocatable", @@ -773,6 +774,39 @@ func TestMachineInfo(t *testing.T) { }, }, }, + { + desc: "resource plugin resources are reflected in capacity and allocatable", + node: &v1.Node{}, + maxPods: 110, + machineInfo: &cadvisorapiv1.MachineInfo{ + NumCores: 2, + MemoryCapacity: 1024, + }, + resourcePluginResourceCapacity: dprc{ + capacity: v1.ResourceList{ + "resource-plugin": *resource.NewQuantity(1, resource.BinarySI), + }, + allocatable: v1.ResourceList{ + "resource-plugin": *resource.NewQuantity(1, resource.BinarySI), + }, + }, + expectNode: &v1.Node{ + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + "resource-plugin": *resource.NewQuantity(1, resource.BinarySI), + }, + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + "resource-plugin": *resource.NewQuantity(1, resource.BinarySI), + }, + }, + }, + }, { desc: "inactive device plugin resources should have their capacity set to 0", node: &v1.Node{ @@ -807,6 +841,40 @@ func TestMachineInfo(t *testing.T) { }, }, }, + { + desc: "inactive resource plugin resources should have their capacity set to 0", + node: &v1.Node{ + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "inactive": *resource.NewQuantity(1, resource.BinarySI), + }, + }, + }, + maxPods: 110, + machineInfo: &cadvisorapiv1.MachineInfo{ + NumCores: 2, + MemoryCapacity: 1024, + }, + resourcePluginResourceCapacity: dprc{ + inactive: []string{"inactive"}, + }, + expectNode: &v1.Node{ + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + "inactive": *resource.NewQuantity(0, resource.BinarySI), + }, + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + "inactive": *resource.NewQuantity(0, resource.BinarySI), + }, + }, + }, + }, { desc: "extended resources not present in capacity are removed from allocatable", node: &v1.Node{ @@ -912,6 +980,10 @@ func TestMachineInfo(t *testing.T) { c := tc.devicePluginResourceCapacity return c.capacity, c.allocatable, c.inactive } + resourcePluginResourceCapacityFunc := func() (v1.ResourceList, v1.ResourceList, []string) { + c := tc.resourcePluginResourceCapacity + return c.capacity, c.allocatable, c.inactive + } nodeAllocatableReservationFunc := func() v1.ResourceList { return tc.nodeAllocatableReservation } @@ -926,7 +998,8 @@ func TestMachineInfo(t *testing.T) { } // construct setter setter := MachineInfo(nodeName, tc.maxPods, tc.podsPerCore, machineInfoFunc, capacityFunc, - devicePluginResourceCapacityFunc, nodeAllocatableReservationFunc, recordEventFunc) + devicePluginResourceCapacityFunc, resourcePluginResourceCapacityFunc, + nodeAllocatableReservationFunc, recordEventFunc) // call setter on node if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 127192e00f74d..6de9b72d17d76 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -193,10 +193,10 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer st } // ListenAndServePodResources initializes a gRPC server to serve the PodResources service -func ListenAndServePodResources(socket string, podsProvider podresources.PodsProvider, devicesProvider podresources.DevicesProvider, cpusProvider podresources.CPUsProvider, memoryProvider podresources.MemoryProvider) { +func ListenAndServePodResources(socket string, podsProvider podresources.PodsProvider, devicesProvider podresources.DevicesProvider, cpusProvider podresources.CPUsProvider, memoryProvider podresources.MemoryProvider, resourcesProvider podresources.ResourcesProvider) { server := grpc.NewServer() podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(podsProvider, devicesProvider)) - podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(podsProvider, devicesProvider, cpusProvider, memoryProvider)) + podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(podsProvider, devicesProvider, cpusProvider, memoryProvider, resourcesProvider)) l, err := util.CreateListener(socket) if err != nil { klog.ErrorS(err, "Failed to create listener for podResources endpoint") diff --git a/staging/src/k8s.io/kubelet/config/v1beta1/types.go b/staging/src/k8s.io/kubelet/config/v1beta1/types.go index 2b397b9d34c11..b5f6b349746af 100644 --- a/staging/src/k8s.io/kubelet/config/v1beta1/types.go +++ b/staging/src/k8s.io/kubelet/config/v1beta1/types.go @@ -372,6 +372,13 @@ type KubeletConfiguration struct { // Default: "none" // +optional TopologyManagerPolicy string `json:"topologyManagerPolicy,omitempty"` + // QoS Resource Manager reconciliation period. + // Requires the QoSResourceManager feature gate to be enabled. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // shortening the period may carry a performance impact. + // Default: "3s" + // +optional + QoSResourceManagerReconcilePeriod metav1.Duration `json:"qosResourceManagerReconcilePeriod,omitempty"` // topologyManagerScope represents the scope of topology hint generation // that topology manager requests and hint providers generate. Valid values include: // @@ -382,6 +389,11 @@ type KubeletConfiguration struct { // Default: "container" // +optional TopologyManagerScope string `json:"topologyManagerScope,omitempty"` + // Map of resource name "A" to resource name "B" during QoS Resource Manager allocation period. + // It's useful for the same kind resource with different types. (eg. maps best-effort-cpu to cpu) + // Default: nil + // +optional + QoSResourceManagerResourceNamesMap map[string]string `json:"qosResourceManagerResourceNamesMap,omitempty"` // qosReserved is a set of resource name to percentage pairs that specify // the minimum percentage of a resource reserved for exclusive use by the // guaranteed QoS tier. diff --git a/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go index 0468c25e0511f..a5f7db6a75c62 100644 --- a/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go @@ -260,6 +260,14 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { } } out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod + out.QoSResourceManagerReconcilePeriod = in.QoSResourceManagerReconcilePeriod + if in.QoSResourceManagerResourceNamesMap != nil { + in, out := &in.QoSResourceManagerResourceNamesMap, &out.QoSResourceManagerResourceNamesMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.QOSReserved != nil { in, out := &in.QOSReserved, &out.QOSReserved *out = make(map[string]string, len(*in)) diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 5b68822ef8279..c86f0d6427a4e 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -121,6 +121,7 @@ github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34 github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= diff --git a/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go b/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go index 7708f758fa2b9..5d6387af5576a 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go @@ -21,4 +21,6 @@ const ( CSIPlugin = "CSIPlugin" // DevicePlugin identifier for registered device plugins DevicePlugin = "DevicePlugin" + // ResourcePlugin identifier for registered resource plugins + ResourcePlugin = "ResourcePlugin" ) diff --git a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go index ac0924b2b1661..d178c9be6197f 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.pb.go @@ -21,9 +21,11 @@ package v1 import ( context "context" + encoding_binary "encoding/binary" fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -84,11 +86,12 @@ var xxx_messageInfo_AllocatableResourcesRequest proto.InternalMessageInfo // AllocatableResourcesResponses contains informations about all the devices known by the kubelet type AllocatableResourcesResponse struct { - Devices []*ContainerDevices `protobuf:"bytes,1,rep,name=devices,proto3" json:"devices,omitempty"` - CpuIds []int64 `protobuf:"varint,2,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"` - Memory []*ContainerMemory `protobuf:"bytes,3,rep,name=memory,proto3" json:"memory,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + Devices []*ContainerDevices `protobuf:"bytes,1,rep,name=devices,proto3" json:"devices,omitempty"` + CpuIds []int64 `protobuf:"varint,2,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"` + Memory []*ContainerMemory `protobuf:"bytes,3,rep,name=memory,proto3" json:"memory,omitempty"` + Resources []*AllocatableTopologyAwareResource `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *AllocatableResourcesResponse) Reset() { *m = AllocatableResourcesResponse{} } @@ -144,6 +147,13 @@ func (m *AllocatableResourcesResponse) GetMemory() []*ContainerMemory { return nil } +func (m *AllocatableResourcesResponse) GetResources() []*AllocatableTopologyAwareResource { + if m != nil { + return m.Resources + } + return nil +} + // ListPodResourcesRequest is the request made to the PodResourcesLister service type ListPodResourcesRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -232,7 +242,11 @@ func (m *ListPodResourcesResponse) GetPodResources() []*PodResources { type PodResources struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Containers []*ContainerResources `protobuf:"bytes,3,rep,name=containers,proto3" json:"containers,omitempty"` + PodRole string `protobuf:"bytes,3,opt,name=pod_role,json=podRole,proto3" json:"pod_role,omitempty"` + PodType string `protobuf:"bytes,4,opt,name=pod_type,json=podType,proto3" json:"pod_type,omitempty"` + Containers []*ContainerResources `protobuf:"bytes,5,rep,name=containers,proto3" json:"containers,omitempty"` + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,7,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_sizecache int32 `json:"-"` } @@ -283,6 +297,20 @@ func (m *PodResources) GetNamespace() string { return "" } +func (m *PodResources) GetPodRole() string { + if m != nil { + return m.PodRole + } + return "" +} + +func (m *PodResources) GetPodType() string { + if m != nil { + return m.PodType + } + return "" +} + func (m *PodResources) GetContainers() []*ContainerResources { if m != nil { return m.Containers @@ -290,14 +318,29 @@ func (m *PodResources) GetContainers() []*ContainerResources { return nil } +func (m *PodResources) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodResources) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + // ContainerResources contains information about the resources assigned to a container type ContainerResources struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"` - CpuIds []int64 `protobuf:"varint,3,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"` - Memory []*ContainerMemory `protobuf:"bytes,4,rep,name=memory,proto3" json:"memory,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"` + CpuIds []int64 `protobuf:"varint,3,rep,packed,name=cpu_ids,json=cpuIds,proto3" json:"cpu_ids,omitempty"` + Memory []*ContainerMemory `protobuf:"bytes,4,rep,name=memory,proto3" json:"memory,omitempty"` + Resources []*TopologyAwareResource `protobuf:"bytes,5,rep,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ContainerResources) Reset() { *m = ContainerResources{} } @@ -360,6 +403,13 @@ func (m *ContainerResources) GetMemory() []*ContainerMemory { return nil } +func (m *ContainerResources) GetResources() []*TopologyAwareResource { + if m != nil { + return m.Resources + } + return nil +} + // ContainerMemory contains information about memory and hugepages assigned to a container type ContainerMemory struct { MemoryType string `protobuf:"bytes,1,opt,name=memory_type,json=memoryType,proto3" json:"memory_type,omitempty"` @@ -484,6 +534,247 @@ func (m *ContainerDevices) GetTopology() *TopologyInfo { return nil } +// TopologyAwareResource contains information about the allocated resource information in topology aware format +type TopologyAwareResource struct { + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + IsNodeResource bool `protobuf:"varint,2,opt,name=is_node_resource,json=isNodeResource,proto3" json:"is_node_resource,omitempty"` + IsScalarResource bool `protobuf:"varint,3,opt,name=is_scalar_resource,json=isScalarResource,proto3" json:"is_scalar_resource,omitempty"` + AggregatedQuantity float64 `protobuf:"fixed64,4,opt,name=aggregated_quantity,json=aggregatedQuantity,proto3" json:"aggregated_quantity,omitempty"` + OriginalAggregatedQuantity float64 `protobuf:"fixed64,5,opt,name=original_aggregated_quantity,json=originalAggregatedQuantity,proto3" json:"original_aggregated_quantity,omitempty"` + TopologyAwareQuantityList []*TopologyAwareQuantity `protobuf:"bytes,6,rep,name=topology_aware_quantity_list,json=topologyAwareQuantityList,proto3" json:"topology_aware_quantity_list,omitempty"` + OriginalTopologyAwareQuantityList []*TopologyAwareQuantity `protobuf:"bytes,7,rep,name=original_topology_aware_quantity_list,json=originalTopologyAwareQuantityList,proto3" json:"original_topology_aware_quantity_list,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyAwareResource) Reset() { *m = TopologyAwareResource{} } +func (*TopologyAwareResource) ProtoMessage() {} +func (*TopologyAwareResource) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{8} +} +func (m *TopologyAwareResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologyAwareResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopologyAwareResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopologyAwareResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyAwareResource.Merge(m, src) +} +func (m *TopologyAwareResource) XXX_Size() int { + return m.Size() +} +func (m *TopologyAwareResource) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyAwareResource.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyAwareResource proto.InternalMessageInfo + +func (m *TopologyAwareResource) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *TopologyAwareResource) GetIsNodeResource() bool { + if m != nil { + return m.IsNodeResource + } + return false +} + +func (m *TopologyAwareResource) GetIsScalarResource() bool { + if m != nil { + return m.IsScalarResource + } + return false +} + +func (m *TopologyAwareResource) GetAggregatedQuantity() float64 { + if m != nil { + return m.AggregatedQuantity + } + return 0 +} + +func (m *TopologyAwareResource) GetOriginalAggregatedQuantity() float64 { + if m != nil { + return m.OriginalAggregatedQuantity + } + return 0 +} + +func (m *TopologyAwareResource) GetTopologyAwareQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.TopologyAwareQuantityList + } + return nil +} + +func (m *TopologyAwareResource) GetOriginalTopologyAwareQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.OriginalTopologyAwareQuantityList + } + return nil +} + +// AllocatableTopologyAwareResource contains information about the allocatable resource information in topology aware format +type AllocatableTopologyAwareResource struct { + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + IsNodeResource bool `protobuf:"varint,2,opt,name=is_node_resource,json=isNodeResource,proto3" json:"is_node_resource,omitempty"` + IsScalarResource bool `protobuf:"varint,3,opt,name=is_scalar_resource,json=isScalarResource,proto3" json:"is_scalar_resource,omitempty"` + AggregatedAllocatableQuantity float64 `protobuf:"fixed64,4,opt,name=aggregated_allocatable_quantity,json=aggregatedAllocatableQuantity,proto3" json:"aggregated_allocatable_quantity,omitempty"` + TopologyAwareAllocatableQuantityList []*TopologyAwareQuantity `protobuf:"bytes,5,rep,name=topology_aware_allocatable_quantity_list,json=topologyAwareAllocatableQuantityList,proto3" json:"topology_aware_allocatable_quantity_list,omitempty"` + AggregatedCapacityQuantity float64 `protobuf:"fixed64,6,opt,name=aggregated_capacity_quantity,json=aggregatedCapacityQuantity,proto3" json:"aggregated_capacity_quantity,omitempty"` + TopologyAwareCapacityQuantityList []*TopologyAwareQuantity `protobuf:"bytes,7,rep,name=topology_aware_capacity_quantity_list,json=topologyAwareCapacityQuantityList,proto3" json:"topology_aware_capacity_quantity_list,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatableTopologyAwareResource) Reset() { *m = AllocatableTopologyAwareResource{} } +func (*AllocatableTopologyAwareResource) ProtoMessage() {} +func (*AllocatableTopologyAwareResource) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{9} +} +func (m *AllocatableTopologyAwareResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatableTopologyAwareResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AllocatableTopologyAwareResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AllocatableTopologyAwareResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatableTopologyAwareResource.Merge(m, src) +} +func (m *AllocatableTopologyAwareResource) XXX_Size() int { + return m.Size() +} +func (m *AllocatableTopologyAwareResource) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatableTopologyAwareResource.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatableTopologyAwareResource proto.InternalMessageInfo + +func (m *AllocatableTopologyAwareResource) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *AllocatableTopologyAwareResource) GetIsNodeResource() bool { + if m != nil { + return m.IsNodeResource + } + return false +} + +func (m *AllocatableTopologyAwareResource) GetIsScalarResource() bool { + if m != nil { + return m.IsScalarResource + } + return false +} + +func (m *AllocatableTopologyAwareResource) GetAggregatedAllocatableQuantity() float64 { + if m != nil { + return m.AggregatedAllocatableQuantity + } + return 0 +} + +func (m *AllocatableTopologyAwareResource) GetTopologyAwareAllocatableQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.TopologyAwareAllocatableQuantityList + } + return nil +} + +func (m *AllocatableTopologyAwareResource) GetAggregatedCapacityQuantity() float64 { + if m != nil { + return m.AggregatedCapacityQuantity + } + return 0 +} + +func (m *AllocatableTopologyAwareResource) GetTopologyAwareCapacityQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.TopologyAwareCapacityQuantityList + } + return nil +} + +type TopologyAwareQuantity struct { + ResourceValue float64 `protobuf:"fixed64,1,opt,name=resource_value,json=resourceValue,proto3" json:"resource_value,omitempty"` + Node uint64 `protobuf:"varint,2,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyAwareQuantity) Reset() { *m = TopologyAwareQuantity{} } +func (*TopologyAwareQuantity) ProtoMessage() {} +func (*TopologyAwareQuantity) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{10} +} +func (m *TopologyAwareQuantity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologyAwareQuantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopologyAwareQuantity.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopologyAwareQuantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyAwareQuantity.Merge(m, src) +} +func (m *TopologyAwareQuantity) XXX_Size() int { + return m.Size() +} +func (m *TopologyAwareQuantity) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyAwareQuantity.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyAwareQuantity proto.InternalMessageInfo + +func (m *TopologyAwareQuantity) GetResourceValue() float64 { + if m != nil { + return m.ResourceValue + } + return 0 +} + +func (m *TopologyAwareQuantity) GetNode() uint64 { + if m != nil { + return m.Node + } + return 0 +} + // Topology describes hardware topology of the resource type TopologyInfo struct { Nodes []*NUMANode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` @@ -494,7 +785,7 @@ type TopologyInfo struct { func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } func (*TopologyInfo) ProtoMessage() {} func (*TopologyInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{8} + return fileDescriptor_00212fb1f9d3bf1c, []int{11} } func (m *TopologyInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -540,7 +831,7 @@ type NUMANode struct { func (m *NUMANode) Reset() { *m = NUMANode{} } func (*NUMANode) ProtoMessage() {} func (*NUMANode) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{9} + return fileDescriptor_00212fb1f9d3bf1c, []int{12} } func (m *NUMANode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,9 +873,14 @@ func init() { proto.RegisterType((*ListPodResourcesRequest)(nil), "v1.ListPodResourcesRequest") proto.RegisterType((*ListPodResourcesResponse)(nil), "v1.ListPodResourcesResponse") proto.RegisterType((*PodResources)(nil), "v1.PodResources") + proto.RegisterMapType((map[string]string)(nil), "v1.PodResources.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "v1.PodResources.LabelsEntry") proto.RegisterType((*ContainerResources)(nil), "v1.ContainerResources") proto.RegisterType((*ContainerMemory)(nil), "v1.ContainerMemory") proto.RegisterType((*ContainerDevices)(nil), "v1.ContainerDevices") + proto.RegisterType((*TopologyAwareResource)(nil), "v1.TopologyAwareResource") + proto.RegisterType((*AllocatableTopologyAwareResource)(nil), "v1.AllocatableTopologyAwareResource") + proto.RegisterType((*TopologyAwareQuantity)(nil), "v1.TopologyAwareQuantity") proto.RegisterType((*TopologyInfo)(nil), "v1.TopologyInfo") proto.RegisterType((*NUMANode)(nil), "v1.NUMANode") } @@ -592,41 +888,67 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 539 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xed, 0xda, 0x21, 0x6d, 0xa6, 0x29, 0x54, 0x0b, 0x22, 0x26, 0x4d, 0xdd, 0xc8, 0x5c, 0x22, - 0x01, 0xae, 0x1a, 0x04, 0xf7, 0xd2, 0x48, 0x28, 0x12, 0x8d, 0x60, 0x55, 0xae, 0x44, 0x8e, 0xbd, - 0x0d, 0x96, 0x12, 0xef, 0xe2, 0x5d, 0x47, 0x84, 0x13, 0x07, 0x3e, 0x80, 0x03, 0x67, 0xfe, 0x83, - 0x3f, 0xe8, 0x91, 0x23, 0x47, 0x1a, 0x7e, 0x04, 0xed, 0xda, 0x4e, 0x9d, 0x26, 0x01, 0xf5, 0xe4, - 0xd9, 0x79, 0x33, 0xb3, 0x6f, 0xe6, 0x8d, 0x17, 0x2a, 0x1e, 0x0f, 0x5d, 0x1e, 0x33, 0xc9, 0xb0, - 0x31, 0x39, 0xaa, 0x3f, 0x19, 0x86, 0xf2, 0x7d, 0x32, 0x70, 0x7d, 0x36, 0x3e, 0x1c, 0xb2, 0x21, - 0x3b, 0xd4, 0xd0, 0x20, 0x39, 0xd7, 0x27, 0x7d, 0xd0, 0x56, 0x9a, 0xe2, 0xec, 0xc3, 0xde, 0xf1, - 0x68, 0xc4, 0x7c, 0x4f, 0x7a, 0x83, 0x11, 0x25, 0x54, 0xb0, 0x24, 0xf6, 0xa9, 0x20, 0xf4, 0x43, - 0x42, 0x85, 0x74, 0xbe, 0x21, 0x68, 0xac, 0xc6, 0x05, 0x67, 0x91, 0xa0, 0xd8, 0x85, 0xcd, 0x80, - 0x4e, 0x42, 0x9f, 0x0a, 0x0b, 0x35, 0xcd, 0xd6, 0x76, 0xfb, 0x9e, 0x3b, 0x39, 0x72, 0x4f, 0x58, - 0x24, 0xbd, 0x30, 0xa2, 0x71, 0x27, 0xc5, 0x48, 0x1e, 0x84, 0x6b, 0xb0, 0xe9, 0xf3, 0xa4, 0x1f, - 0x06, 0xc2, 0x32, 0x9a, 0x66, 0xcb, 0x24, 0x65, 0x9f, 0x27, 0xdd, 0x40, 0xe0, 0x47, 0x50, 0x1e, - 0xd3, 0x31, 0x8b, 0xa7, 0x96, 0xa9, 0xeb, 0xdc, 0x5d, 0xa8, 0x73, 0xaa, 0x21, 0x92, 0x85, 0x38, - 0x0f, 0xa0, 0xf6, 0x2a, 0x14, 0xf2, 0x35, 0x0b, 0x96, 0x18, 0xbf, 0x01, 0x6b, 0x19, 0xca, 0xc8, - 0x3e, 0x83, 0x1d, 0xce, 0x82, 0x7e, 0x9c, 0x03, 0x19, 0xe5, 0x5d, 0x75, 0xd5, 0x42, 0x42, 0x95, - 0x17, 0x4e, 0xce, 0x47, 0xa8, 0x16, 0x51, 0x8c, 0xa1, 0x14, 0x79, 0x63, 0x6a, 0xa1, 0x26, 0x6a, - 0x55, 0x88, 0xb6, 0x71, 0x03, 0x2a, 0xea, 0x2b, 0xb8, 0xe7, 0x53, 0xcb, 0xd0, 0xc0, 0x95, 0x03, - 0x3f, 0x07, 0xf0, 0xf3, 0x56, 0x44, 0xd6, 0xe0, 0xfd, 0x85, 0x06, 0xaf, 0xee, 0x2e, 0x44, 0x3a, - 0xdf, 0x11, 0xe0, 0xe5, 0x90, 0x95, 0x04, 0x0a, 0x42, 0x18, 0x37, 0x14, 0xc2, 0x5c, 0x23, 0x44, - 0xe9, 0xff, 0x42, 0x48, 0xb8, 0x73, 0x0d, 0xc2, 0x07, 0xb0, 0x9d, 0x82, 0x7d, 0x39, 0xe5, 0x39, - 0x47, 0x48, 0x5d, 0x67, 0x53, 0x4e, 0x15, 0x7b, 0x11, 0x7e, 0x4a, 0xa7, 0x54, 0x22, 0xda, 0xc6, - 0x8f, 0x61, 0x4b, 0x32, 0xce, 0x46, 0x6c, 0xa8, 0xf4, 0x47, 0xb9, 0x28, 0x67, 0x99, 0xaf, 0x1b, - 0x9d, 0x33, 0x32, 0x8f, 0x70, 0xbe, 0x20, 0xd8, 0xbd, 0xde, 0x19, 0x7e, 0x08, 0x3b, 0xb9, 0xb0, - 0xfd, 0xc2, 0x74, 0xaa, 0xb9, 0xb3, 0xa7, 0xa6, 0xb4, 0x0f, 0x90, 0x0e, 0x60, 0xbe, 0x81, 0x15, - 0x52, 0x49, 0x3d, 0xaa, 0xf7, 0x9b, 0xd1, 0x68, 0x43, 0xb5, 0x88, 0x60, 0x07, 0x6e, 0x45, 0x2c, - 0x98, 0xaf, 0x55, 0x55, 0xa5, 0xf6, 0xde, 0x9e, 0x1e, 0xf7, 0x58, 0x40, 0x49, 0x0a, 0x39, 0x75, - 0xd8, 0xca, 0x5d, 0xf8, 0x36, 0x18, 0xdd, 0x8e, 0xa6, 0x69, 0x12, 0xa3, 0xdb, 0x69, 0xff, 0x40, - 0x80, 0x8b, 0x8b, 0xa6, 0xf6, 0x98, 0xc6, 0xf8, 0x04, 0x4a, 0xca, 0xc2, 0x7b, 0xaa, 0xde, 0x9a, - 0xb5, 0xaf, 0x37, 0x56, 0x83, 0xe9, 0xe2, 0x3b, 0x1b, 0xf8, 0x1d, 0xd4, 0x5e, 0x52, 0xb9, 0xea, - 0x57, 0xc6, 0x07, 0x2a, 0xf5, 0x1f, 0x8f, 0x40, 0xbd, 0xb9, 0x3e, 0x20, 0xaf, 0xff, 0xa2, 0x71, - 0x71, 0x69, 0xa3, 0x5f, 0x97, 0xf6, 0xc6, 0xe7, 0x99, 0x8d, 0x2e, 0x66, 0x36, 0xfa, 0x39, 0xb3, - 0xd1, 0xef, 0x99, 0x8d, 0xbe, 0xfe, 0xb1, 0x37, 0x06, 0x65, 0xfd, 0xd8, 0x3c, 0xfd, 0x1b, 0x00, - 0x00, 0xff, 0xff, 0x43, 0x46, 0x5d, 0x7f, 0xac, 0x04, 0x00, 0x00, + // 952 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x36, 0x45, 0xfd, 0x58, 0x63, 0xc5, 0x15, 0x36, 0x69, 0x4d, 0x2b, 0xb2, 0xac, 0xb0, 0x31, + 0x20, 0xa0, 0xa9, 0x8c, 0xb8, 0xff, 0x3d, 0x14, 0x51, 0xec, 0xb6, 0x30, 0x90, 0x18, 0x0d, 0xeb, + 0xf6, 0xd0, 0x43, 0x89, 0x15, 0xb9, 0x51, 0x09, 0xd3, 0x5c, 0x9a, 0xbb, 0x72, 0xa1, 0x9e, 0x7a, + 0xe8, 0xa5, 0xb7, 0x3e, 0x44, 0x5f, 0xa2, 0x6f, 0x90, 0x63, 0x6e, 0xcd, 0xb1, 0x71, 0x5f, 0xa4, + 0xd8, 0x5d, 0xf1, 0x47, 0x24, 0x55, 0xd9, 0xa7, 0x9c, 0xb4, 0x9c, 0xf9, 0x66, 0xe6, 0xdb, 0x6f, + 0x66, 0x57, 0x0b, 0x4d, 0x1c, 0x7a, 0xc3, 0x30, 0xa2, 0x9c, 0xa2, 0xca, 0xe5, 0xc3, 0xce, 0xfb, + 0x13, 0x8f, 0xff, 0x34, 0x1d, 0x0f, 0x1d, 0x7a, 0xbe, 0x3f, 0xa1, 0x13, 0xba, 0x2f, 0x5d, 0xe3, + 0xe9, 0x73, 0xf9, 0x25, 0x3f, 0xe4, 0x4a, 0x85, 0x98, 0x3b, 0x70, 0x77, 0xe4, 0xfb, 0xd4, 0xc1, + 0x1c, 0x8f, 0x7d, 0x62, 0x11, 0x46, 0xa7, 0x91, 0x43, 0x98, 0x45, 0x2e, 0xa6, 0x84, 0x71, 0xf3, + 0x95, 0x06, 0xdd, 0x72, 0x3f, 0x0b, 0x69, 0xc0, 0x08, 0x1a, 0x42, 0xc3, 0x25, 0x97, 0x9e, 0x43, + 0x98, 0xa1, 0xf5, 0xf5, 0xc1, 0xc6, 0xc1, 0x9d, 0xe1, 0xe5, 0xc3, 0xe1, 0x21, 0x0d, 0x38, 0xf6, + 0x02, 0x12, 0x1d, 0x29, 0x9f, 0x15, 0x83, 0xd0, 0x16, 0x34, 0x9c, 0x70, 0x6a, 0x7b, 0x2e, 0x33, + 0x2a, 0x7d, 0x7d, 0xa0, 0x5b, 0x75, 0x27, 0x9c, 0x1e, 0xbb, 0x0c, 0xbd, 0x07, 0xf5, 0x73, 0x72, + 0x4e, 0xa3, 0x99, 0xa1, 0xcb, 0x3c, 0xb7, 0x17, 0xf2, 0x3c, 0x95, 0x2e, 0x6b, 0x0e, 0x41, 0x8f, + 0xa1, 0x19, 0xc5, 0x54, 0x8c, 0xaa, 0xc4, 0xdf, 0x17, 0xf8, 0x0c, 0xd5, 0x53, 0x1a, 0x52, 0x9f, + 0x4e, 0x66, 0xa3, 0x9f, 0x71, 0x94, 0xf0, 0xb6, 0xd2, 0x30, 0x73, 0x1b, 0xb6, 0x9e, 0x78, 0x8c, + 0x7f, 0x43, 0xdd, 0xc2, 0xae, 0x9f, 0x81, 0x51, 0x74, 0xcd, 0x37, 0xfc, 0x11, 0xdc, 0x0a, 0xa9, + 0x6b, 0xa7, 0xe5, 0xd5, 0xb6, 0xdb, 0xa2, 0xfc, 0x42, 0x40, 0x2b, 0xcc, 0x7c, 0x99, 0x7f, 0xea, + 0xd0, 0xca, 0xba, 0x11, 0x82, 0x6a, 0x80, 0xcf, 0x89, 0xa1, 0xf5, 0xb5, 0x41, 0xd3, 0x92, 0x6b, + 0xd4, 0x85, 0xa6, 0xf8, 0x65, 0x21, 0x76, 0x88, 0x51, 0x91, 0x8e, 0xd4, 0x80, 0xb6, 0x61, 0x5d, + 0x56, 0xa6, 0x3e, 0x31, 0x74, 0xe9, 0x6c, 0x88, 0x12, 0xd4, 0x4f, 0x5c, 0x7c, 0x16, 0x12, 0xa3, + 0x9a, 0xb8, 0x4e, 0x67, 0x21, 0x41, 0x1f, 0x03, 0x38, 0xb1, 0x8a, 0xcc, 0xa8, 0x49, 0xb2, 0xef, + 0x2c, 0x68, 0x9b, 0x52, 0xce, 0x20, 0xd1, 0x87, 0x50, 0xf7, 0xf1, 0x98, 0xf8, 0xcc, 0xa8, 0xcb, + 0x98, 0x6e, 0x7e, 0x83, 0xc3, 0x27, 0xd2, 0xfd, 0x65, 0xc0, 0x45, 0x63, 0x14, 0x16, 0x1d, 0xc2, + 0x06, 0x0e, 0x02, 0xca, 0x31, 0xf7, 0x68, 0xc0, 0x8c, 0x86, 0x0c, 0xbd, 0x57, 0x08, 0x1d, 0xa5, + 0x18, 0x15, 0x9f, 0x8d, 0xea, 0x7c, 0x06, 0x1b, 0x99, 0xdc, 0xa8, 0x0d, 0xfa, 0x19, 0x99, 0xcd, + 0x85, 0x12, 0x4b, 0x74, 0x07, 0x6a, 0x97, 0xd8, 0x9f, 0xc6, 0x1a, 0xa9, 0x8f, 0xcf, 0x2b, 0x9f, + 0x6a, 0x9d, 0x2f, 0xa0, 0x9d, 0xcf, 0x7d, 0x93, 0x78, 0xf3, 0x6f, 0x0d, 0x50, 0x51, 0x98, 0xd2, + 0x66, 0x65, 0x26, 0xbf, 0x72, 0xc3, 0xc9, 0xd7, 0x97, 0x4c, 0x7e, 0x75, 0xf5, 0xe4, 0x7f, 0x92, + 0x9d, 0x7c, 0xd5, 0xcd, 0x6d, 0x81, 0x5f, 0x39, 0xee, 0x1c, 0xde, 0xca, 0xe5, 0x44, 0xbb, 0xb0, + 0xa1, 0xb2, 0xaa, 0xc1, 0x51, 0x9b, 0x03, 0x65, 0x92, 0xb3, 0x83, 0xa0, 0xca, 0xbc, 0x5f, 0x94, + 0x4c, 0x55, 0x4b, 0xae, 0xd1, 0x03, 0x58, 0xe7, 0xf3, 0x5a, 0x72, 0x0a, 0xe7, 0xa3, 0x1f, 0xd7, + 0x3f, 0x0e, 0x9e, 0x53, 0x2b, 0x41, 0x98, 0xbf, 0x69, 0xd0, 0xce, 0x4b, 0x82, 0xde, 0x85, 0x5b, + 0x31, 0x2f, 0x3b, 0x23, 0x6b, 0x2b, 0x36, 0x9e, 0x08, 0x79, 0x77, 0x00, 0x94, 0x72, 0xc9, 0x5d, + 0xd1, 0xb4, 0x9a, 0xca, 0x22, 0x44, 0xbb, 0x19, 0x8d, 0x97, 0x3a, 0xbc, 0x5d, 0xaa, 0xd0, 0xf5, + 0xb8, 0x0c, 0xa0, 0xed, 0x31, 0x3b, 0xa0, 0x2e, 0x49, 0xce, 0xbd, 0xd4, 0x64, 0xdd, 0xda, 0xf4, + 0xd8, 0x09, 0x75, 0xd3, 0x74, 0x0f, 0x00, 0x79, 0xcc, 0x66, 0x0e, 0xf6, 0x71, 0x94, 0x62, 0x75, + 0x89, 0x6d, 0x7b, 0xec, 0x5b, 0xe9, 0x48, 0xd0, 0xfb, 0x70, 0x1b, 0x4f, 0x26, 0x11, 0x99, 0x60, + 0x4e, 0x5c, 0xfb, 0x62, 0x8a, 0x03, 0xee, 0xf1, 0x99, 0x3c, 0xc1, 0x9a, 0x85, 0x52, 0xd7, 0xb3, + 0xb9, 0x07, 0x3d, 0x82, 0x2e, 0x8d, 0xbc, 0x89, 0x17, 0x60, 0xdf, 0x2e, 0x8b, 0xac, 0xc9, 0xc8, + 0x4e, 0x8c, 0x19, 0x15, 0x33, 0xfc, 0x00, 0xdd, 0x58, 0x15, 0x1b, 0x0b, 0x25, 0x92, 0x60, 0xdb, + 0xf7, 0x18, 0x9f, 0x1f, 0xf6, 0xe2, 0x48, 0xc5, 0x09, 0xac, 0x6d, 0x5e, 0x66, 0x16, 0xd7, 0x25, + 0x3a, 0x83, 0xbd, 0x84, 0xdd, 0xff, 0x16, 0x69, 0xac, 0x2a, 0x72, 0x2f, 0xce, 0x73, 0xba, 0xac, + 0x98, 0xf9, 0x7b, 0x15, 0xfa, 0xab, 0xae, 0xfb, 0x37, 0xdb, 0xdd, 0xaf, 0x60, 0x37, 0xd3, 0x23, + 0x9c, 0x72, 0xcd, 0x77, 0x7a, 0x27, 0x85, 0x65, 0x76, 0x94, 0xb4, 0xec, 0x02, 0x06, 0x39, 0x35, + 0xcb, 0x72, 0x29, 0x65, 0x6b, 0xab, 0x94, 0xbd, 0xbf, 0xd0, 0xbe, 0x92, 0x72, 0xb2, 0x93, 0x8f, + 0xa0, 0x9b, 0xa1, 0xee, 0xe0, 0x10, 0x3b, 0xa2, 0x44, 0xc2, 0xbb, 0xae, 0xe6, 0x2c, 0xc5, 0x1c, + 0xce, 0x21, 0x09, 0xe9, 0x33, 0xd8, 0xcb, 0x91, 0x2e, 0x64, 0xb9, 0xee, 0x2c, 0x2c, 0x30, 0xce, + 0x17, 0x92, 0xb3, 0x60, 0xe5, 0x4e, 0x77, 0xc2, 0x62, 0x0f, 0x36, 0x93, 0xfe, 0xab, 0x1b, 0x5f, + 0x93, 0xcc, 0x93, 0xa9, 0xf8, 0x5e, 0x18, 0xe5, 0xf5, 0x4e, 0xdd, 0xe4, 0x9e, 0x13, 0x6b, 0xf3, + 0x00, 0x5a, 0xd9, 0xcb, 0x04, 0x99, 0x50, 0x13, 0xf6, 0xf8, 0xff, 0xbe, 0x25, 0x08, 0x9f, 0x7c, + 0xf7, 0x74, 0x24, 0x07, 0x44, 0xb9, 0xcc, 0x0e, 0xac, 0xc7, 0x26, 0xb4, 0x09, 0x95, 0xe3, 0x23, + 0x59, 0x4e, 0xb7, 0x2a, 0xc7, 0x47, 0x07, 0x7f, 0x69, 0x80, 0xb2, 0xff, 0x81, 0x82, 0x38, 0x89, + 0xd0, 0x21, 0x54, 0xa5, 0xe2, 0x77, 0x45, 0xbe, 0x25, 0xef, 0x91, 0x4e, 0xb7, 0xdc, 0xa9, 0x5e, + 0x24, 0xe6, 0x1a, 0xfa, 0x11, 0xb6, 0xbe, 0x26, 0xbc, 0xec, 0x9d, 0x86, 0x76, 0x73, 0xcf, 0xa2, + 0x42, 0xee, 0xfe, 0x72, 0x40, 0x9c, 0xff, 0x71, 0xf7, 0xc5, 0xeb, 0x9e, 0xf6, 0xea, 0x75, 0x6f, + 0xed, 0xd7, 0xab, 0x9e, 0xf6, 0xe2, 0xaa, 0xa7, 0xbd, 0xbc, 0xea, 0x69, 0xff, 0x5c, 0xf5, 0xb4, + 0x3f, 0xfe, 0xed, 0xad, 0x8d, 0xeb, 0xf2, 0x25, 0xf9, 0xc1, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xc9, 0x8e, 0xe6, 0x8f, 0x89, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -788,6 +1110,20 @@ func (m *AllocatableResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, e _ = i var l int _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.Memory) > 0 { for iNdEx := len(m.Memory) - 1; iNdEx >= 0; iNdEx-- { { @@ -918,6 +1254,44 @@ func (m *PodResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } if len(m.Containers) > 0 { for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { { @@ -929,9 +1303,23 @@ func (m *PodResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintApi(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x2a } } + if len(m.PodType) > 0 { + i -= len(m.PodType) + copy(dAtA[i:], m.PodType) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodType))) + i-- + dAtA[i] = 0x22 + } + if len(m.PodRole) > 0 { + i -= len(m.PodRole) + copy(dAtA[i:], m.PodRole) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodRole))) + i-- + dAtA[i] = 0x1a + } if len(m.Namespace) > 0 { i -= len(m.Namespace) copy(dAtA[i:], m.Namespace) @@ -969,6 +1357,20 @@ func (m *ContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Memory) > 0 { for iNdEx := len(m.Memory) - 1; iNdEx >= 0; iNdEx-- { { @@ -1124,7 +1526,7 @@ func (m *ContainerDevices) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TopologyInfo) Marshal() (dAtA []byte, err error) { +func (m *TopologyAwareResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1134,20 +1536,20 @@ func (m *TopologyInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TopologyInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *TopologyAwareResource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TopologyInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TopologyAwareResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Nodes) > 0 { - for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + if len(m.OriginalTopologyAwareQuantityList) > 0 { + for iNdEx := len(m.OriginalTopologyAwareQuantityList) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.OriginalTopologyAwareQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1155,13 +1557,227 @@ func (m *TopologyInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintApi(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x3a + } + } + if len(m.TopologyAwareQuantityList) > 0 { + for iNdEx := len(m.TopologyAwareQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologyAwareQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.OriginalAggregatedQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.OriginalAggregatedQuantity)))) + i-- + dAtA[i] = 0x29 + } + if m.AggregatedQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AggregatedQuantity)))) + i-- + dAtA[i] = 0x21 + } + if m.IsScalarResource { + i-- + if m.IsScalarResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IsNodeResource { + i-- + if m.IsNodeResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 + } + if len(m.ResourceName) > 0 { + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *NUMANode) Marshal() (dAtA []byte, err error) { +func (m *AllocatableTopologyAwareResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatableTopologyAwareResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatableTopologyAwareResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TopologyAwareCapacityQuantityList) > 0 { + for iNdEx := len(m.TopologyAwareCapacityQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologyAwareCapacityQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.AggregatedCapacityQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AggregatedCapacityQuantity)))) + i-- + dAtA[i] = 0x31 + } + if len(m.TopologyAwareAllocatableQuantityList) > 0 { + for iNdEx := len(m.TopologyAwareAllocatableQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologyAwareAllocatableQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.AggregatedAllocatableQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AggregatedAllocatableQuantity)))) + i-- + dAtA[i] = 0x21 + } + if m.IsScalarResource { + i-- + if m.IsScalarResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IsNodeResource { + i-- + if m.IsNodeResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.ResourceName) > 0 { + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TopologyAwareQuantity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologyAwareQuantity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologyAwareQuantity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Node != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Node)) + i-- + dAtA[i] = 0x10 + } + if m.ResourceValue != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ResourceValue)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *TopologyInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologyInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologyInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NUMANode) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1234,6 +1850,12 @@ func (m *AllocatableResourcesResponse) Size() (n int) { n += 1 + l + sovApi(uint64(l)) } } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } return n } @@ -1275,12 +1897,36 @@ func (m *PodResources) Size() (n int) { if l > 0 { n += 1 + l + sovApi(uint64(l)) } + l = len(m.PodRole) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodType) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } if len(m.Containers) > 0 { for _, e := range m.Containers { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } return n } @@ -1313,6 +1959,12 @@ func (m *ContainerResources) Size() (n int) { n += 1 + l + sovApi(uint64(l)) } } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } return n } @@ -1359,6 +2011,95 @@ func (m *ContainerDevices) Size() (n int) { return n } +func (m *TopologyAwareResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.IsNodeResource { + n += 2 + } + if m.IsScalarResource { + n += 2 + } + if m.AggregatedQuantity != 0 { + n += 9 + } + if m.OriginalAggregatedQuantity != 0 { + n += 9 + } + if len(m.TopologyAwareQuantityList) > 0 { + for _, e := range m.TopologyAwareQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.OriginalTopologyAwareQuantityList) > 0 { + for _, e := range m.OriginalTopologyAwareQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *AllocatableTopologyAwareResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.IsNodeResource { + n += 2 + } + if m.IsScalarResource { + n += 2 + } + if m.AggregatedAllocatableQuantity != 0 { + n += 9 + } + if len(m.TopologyAwareAllocatableQuantityList) > 0 { + for _, e := range m.TopologyAwareAllocatableQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.AggregatedCapacityQuantity != 0 { + n += 9 + } + if len(m.TopologyAwareCapacityQuantityList) > 0 { + for _, e := range m.TopologyAwareCapacityQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *TopologyAwareQuantity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResourceValue != 0 { + n += 9 + } + if m.Node != 0 { + n += 1 + sovApi(uint64(m.Node)) + } + return n +} + func (m *TopologyInfo) Size() (n int) { if m == nil { return 0 @@ -1415,10 +2156,16 @@ func (this *AllocatableResourcesResponse) String() string { repeatedStringForMemory += strings.Replace(f.String(), "ContainerMemory", "ContainerMemory", 1) + "," } repeatedStringForMemory += "}" + repeatedStringForResources := "[]*AllocatableTopologyAwareResource{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(f.String(), "AllocatableTopologyAwareResource", "AllocatableTopologyAwareResource", 1) + "," + } + repeatedStringForResources += "}" s := strings.Join([]string{`&AllocatableResourcesResponse{`, `Devices:` + repeatedStringForDevices + `,`, `CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`, `Memory:` + repeatedStringForMemory + `,`, + `Resources:` + repeatedStringForResources + `,`, `}`, }, "") return s @@ -1456,10 +2203,34 @@ func (this *PodResources) String() string { repeatedStringForContainers += strings.Replace(f.String(), "ContainerResources", "ContainerResources", 1) + "," } repeatedStringForContainers += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" s := strings.Join([]string{`&PodResources{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `PodRole:` + fmt.Sprintf("%v", this.PodRole) + `,`, + `PodType:` + fmt.Sprintf("%v", this.PodType) + `,`, `Containers:` + repeatedStringForContainers + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, `}`, }, "") return s @@ -1478,11 +2249,17 @@ func (this *ContainerResources) String() string { repeatedStringForMemory += strings.Replace(f.String(), "ContainerMemory", "ContainerMemory", 1) + "," } repeatedStringForMemory += "}" + repeatedStringForResources := "[]*TopologyAwareResource{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(f.String(), "TopologyAwareResource", "TopologyAwareResource", 1) + "," + } + repeatedStringForResources += "}" s := strings.Join([]string{`&ContainerResources{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Devices:` + repeatedStringForDevices + `,`, `CpuIds:` + fmt.Sprintf("%v", this.CpuIds) + `,`, `Memory:` + repeatedStringForMemory + `,`, + `Resources:` + repeatedStringForResources + `,`, `}`, }, "") return s @@ -1511,6 +2288,69 @@ func (this *ContainerDevices) String() string { }, "") return s } +func (this *TopologyAwareResource) String() string { + if this == nil { + return "nil" + } + repeatedStringForTopologyAwareQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.TopologyAwareQuantityList { + repeatedStringForTopologyAwareQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForTopologyAwareQuantityList += "}" + repeatedStringForOriginalTopologyAwareQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.OriginalTopologyAwareQuantityList { + repeatedStringForOriginalTopologyAwareQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForOriginalTopologyAwareQuantityList += "}" + s := strings.Join([]string{`&TopologyAwareResource{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `IsNodeResource:` + fmt.Sprintf("%v", this.IsNodeResource) + `,`, + `IsScalarResource:` + fmt.Sprintf("%v", this.IsScalarResource) + `,`, + `AggregatedQuantity:` + fmt.Sprintf("%v", this.AggregatedQuantity) + `,`, + `OriginalAggregatedQuantity:` + fmt.Sprintf("%v", this.OriginalAggregatedQuantity) + `,`, + `TopologyAwareQuantityList:` + repeatedStringForTopologyAwareQuantityList + `,`, + `OriginalTopologyAwareQuantityList:` + repeatedStringForOriginalTopologyAwareQuantityList + `,`, + `}`, + }, "") + return s +} +func (this *AllocatableTopologyAwareResource) String() string { + if this == nil { + return "nil" + } + repeatedStringForTopologyAwareAllocatableQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.TopologyAwareAllocatableQuantityList { + repeatedStringForTopologyAwareAllocatableQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForTopologyAwareAllocatableQuantityList += "}" + repeatedStringForTopologyAwareCapacityQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.TopologyAwareCapacityQuantityList { + repeatedStringForTopologyAwareCapacityQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForTopologyAwareCapacityQuantityList += "}" + s := strings.Join([]string{`&AllocatableTopologyAwareResource{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `IsNodeResource:` + fmt.Sprintf("%v", this.IsNodeResource) + `,`, + `IsScalarResource:` + fmt.Sprintf("%v", this.IsScalarResource) + `,`, + `AggregatedAllocatableQuantity:` + fmt.Sprintf("%v", this.AggregatedAllocatableQuantity) + `,`, + `TopologyAwareAllocatableQuantityList:` + repeatedStringForTopologyAwareAllocatableQuantityList + `,`, + `AggregatedCapacityQuantity:` + fmt.Sprintf("%v", this.AggregatedCapacityQuantity) + `,`, + `TopologyAwareCapacityQuantityList:` + repeatedStringForTopologyAwareCapacityQuantityList + `,`, + `}`, + }, "") + return s +} +func (this *TopologyAwareQuantity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologyAwareQuantity{`, + `ResourceValue:` + fmt.Sprintf("%v", this.ResourceValue) + `,`, + `Node:` + fmt.Sprintf("%v", this.Node) + `,`, + `}`, + }, "") + return s +} func (this *TopologyInfo) String() string { if this == nil { return "nil" @@ -1767,19 +2607,53 @@ func (m *AllocatableResourcesResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthApi } - iNdEx += skippy + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &AllocatableTopologyAwareResource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } @@ -2016,6 +2890,70 @@ func (m *PodResources) Unmarshal(dAtA []byte) error { m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodRole", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodRole = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) } @@ -2049,6 +2987,260 @@ func (m *PodResources) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -2275,14 +3467,48 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &TopologyAwareResource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -2583,6 +3809,510 @@ func (m *ContainerDevices) Unmarshal(dAtA []byte) error { } return nil } +func (m *TopologyAwareResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologyAwareResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologyAwareResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNodeResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNodeResource = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsScalarResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsScalarResource = bool(v != 0) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.AggregatedQuantity = float64(math.Float64frombits(v)) + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalAggregatedQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.OriginalAggregatedQuantity = float64(math.Float64frombits(v)) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyAwareQuantityList = append(m.TopologyAwareQuantityList, &TopologyAwareQuantity{}) + if err := m.TopologyAwareQuantityList[len(m.TopologyAwareQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalTopologyAwareQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalTopologyAwareQuantityList = append(m.OriginalTopologyAwareQuantityList, &TopologyAwareQuantity{}) + if err := m.OriginalTopologyAwareQuantityList[len(m.OriginalTopologyAwareQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocatableTopologyAwareResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatableTopologyAwareResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatableTopologyAwareResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNodeResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNodeResource = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsScalarResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsScalarResource = bool(v != 0) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedAllocatableQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.AggregatedAllocatableQuantity = float64(math.Float64frombits(v)) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareAllocatableQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyAwareAllocatableQuantityList = append(m.TopologyAwareAllocatableQuantityList, &TopologyAwareQuantity{}) + if err := m.TopologyAwareAllocatableQuantityList[len(m.TopologyAwareAllocatableQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedCapacityQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.AggregatedCapacityQuantity = float64(math.Float64frombits(v)) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareCapacityQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyAwareCapacityQuantityList = append(m.TopologyAwareCapacityQuantityList, &TopologyAwareQuantity{}) + if err := m.TopologyAwareCapacityQuantityList[len(m.TopologyAwareCapacityQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologyAwareQuantity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologyAwareQuantity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologyAwareQuantity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ResourceValue = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + m.Node = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Node |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TopologyInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto index add2aad487498..d3610d2a85399 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto +++ b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1/api.proto @@ -28,6 +28,7 @@ message AllocatableResourcesResponse { repeated ContainerDevices devices = 1; repeated int64 cpu_ids = 2; repeated ContainerMemory memory = 3; + repeated AllocatableTopologyAwareResource resources = 4; } // ListPodResourcesRequest is the request made to the PodResourcesLister service @@ -42,7 +43,11 @@ message ListPodResourcesResponse { message PodResources { string name = 1; string namespace = 2; - repeated ContainerResources containers = 3; + string pod_role = 3; + string pod_type = 4; + repeated ContainerResources containers = 5; + map labels = 6; + map annotations = 7; } // ContainerResources contains information about the resources assigned to a container @@ -51,6 +56,7 @@ message ContainerResources { repeated ContainerDevices devices = 2; repeated int64 cpu_ids = 3; repeated ContainerMemory memory = 4; + repeated TopologyAwareResource resources = 5; } // ContainerMemory contains information about memory and hugepages assigned to a container @@ -67,6 +73,34 @@ message ContainerDevices { TopologyInfo topology = 3; } + +// TopologyAwareResource contains information about the allocated resource information in topology aware format +message TopologyAwareResource { + string resource_name = 1; + bool is_node_resource = 2; + bool is_scalar_resource = 3; + double aggregated_quantity = 4; + double original_aggregated_quantity = 5; + repeated TopologyAwareQuantity topology_aware_quantity_list = 6; + repeated TopologyAwareQuantity original_topology_aware_quantity_list = 7; +} + +// AllocatableTopologyAwareResource contains information about the allocatable resource information in topology aware format +message AllocatableTopologyAwareResource { + string resource_name = 1; + bool is_node_resource = 2; + bool is_scalar_resource = 3; + double aggregated_allocatable_quantity = 4; + repeated TopologyAwareQuantity topology_aware_allocatable_quantity_list = 5; + double aggregated_capacity_quantity = 6; + repeated TopologyAwareQuantity topology_aware_capacity_quantity_list = 7; +} + +message TopologyAwareQuantity { + double resource_value = 1; + uint64 node = 2; +} + // Topology describes hardware topology of the resource message TopologyInfo { repeated NUMANode nodes = 1; diff --git a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go index e4ebb48397782..e3e485a938174 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.pb.go @@ -24,6 +24,7 @@ import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -299,40 +300,330 @@ func (m *ContainerDevices) GetDeviceIds() []string { return nil } +// PodTopologyAwareResources contains information about the resources assigned to a container, +// and organized as topology aware format. +type PodTopologyAwareResources struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + PodName string `protobuf:"bytes,2,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + PodNamespace string `protobuf:"bytes,3,opt,name=pod_namespace,json=podNamespace,proto3" json:"pod_namespace,omitempty"` + ContainerTopologyAwareResources []*ContainerTopologyAwareResources `protobuf:"bytes,4,rep,name=container_topology_aware_resources,json=containerTopologyAwareResources,proto3" json:"container_topology_aware_resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodTopologyAwareResources) Reset() { *m = PodTopologyAwareResources{} } +func (*PodTopologyAwareResources) ProtoMessage() {} +func (*PodTopologyAwareResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{5} +} +func (m *PodTopologyAwareResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTopologyAwareResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodTopologyAwareResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodTopologyAwareResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTopologyAwareResources.Merge(m, src) +} +func (m *PodTopologyAwareResources) XXX_Size() int { + return m.Size() +} +func (m *PodTopologyAwareResources) XXX_DiscardUnknown() { + xxx_messageInfo_PodTopologyAwareResources.DiscardUnknown(m) +} + +var xxx_messageInfo_PodTopologyAwareResources proto.InternalMessageInfo + +func (m *PodTopologyAwareResources) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +func (m *PodTopologyAwareResources) GetPodName() string { + if m != nil { + return m.PodName + } + return "" +} + +func (m *PodTopologyAwareResources) GetPodNamespace() string { + if m != nil { + return m.PodNamespace + } + return "" +} + +func (m *PodTopologyAwareResources) GetContainerTopologyAwareResources() []*ContainerTopologyAwareResources { + if m != nil { + return m.ContainerTopologyAwareResources + } + return nil +} + +type ContainerTopologyAwareResources struct { + ContainerName string `protobuf:"bytes,1,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + AllocatedResources *TopologyAwareResources `protobuf:"bytes,2,opt,name=allocated_resources,json=allocatedResources,proto3" json:"allocated_resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerTopologyAwareResources) Reset() { *m = ContainerTopologyAwareResources{} } +func (*ContainerTopologyAwareResources) ProtoMessage() {} +func (*ContainerTopologyAwareResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{6} +} +func (m *ContainerTopologyAwareResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerTopologyAwareResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerTopologyAwareResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerTopologyAwareResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerTopologyAwareResources.Merge(m, src) +} +func (m *ContainerTopologyAwareResources) XXX_Size() int { + return m.Size() +} +func (m *ContainerTopologyAwareResources) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerTopologyAwareResources.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerTopologyAwareResources proto.InternalMessageInfo + +func (m *ContainerTopologyAwareResources) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +func (m *ContainerTopologyAwareResources) GetAllocatedResources() *TopologyAwareResources { + if m != nil { + return m.AllocatedResources + } + return nil +} + +type TopologyAwareResources struct { + TopologyAwareResources map[string]*ListOfTopologyAwareQuantity `protobuf:"bytes,1,rep,name=topology_aware_resources,json=topologyAwareResources,proto3" json:"topology_aware_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyAwareResources) Reset() { *m = TopologyAwareResources{} } +func (*TopologyAwareResources) ProtoMessage() {} +func (*TopologyAwareResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{7} +} +func (m *TopologyAwareResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologyAwareResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopologyAwareResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopologyAwareResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyAwareResources.Merge(m, src) +} +func (m *TopologyAwareResources) XXX_Size() int { + return m.Size() +} +func (m *TopologyAwareResources) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyAwareResources.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyAwareResources proto.InternalMessageInfo + +func (m *TopologyAwareResources) GetTopologyAwareResources() map[string]*ListOfTopologyAwareQuantity { + if m != nil { + return m.TopologyAwareResources + } + return nil +} + +type ListOfTopologyAwareQuantity struct { + TopologyAwareQuantityList []*TopologyAwareQuantity `protobuf:"bytes,1,rep,name=topology_aware_quantity_list,json=topologyAwareQuantityList,proto3" json:"topology_aware_quantity_list,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListOfTopologyAwareQuantity) Reset() { *m = ListOfTopologyAwareQuantity{} } +func (*ListOfTopologyAwareQuantity) ProtoMessage() {} +func (*ListOfTopologyAwareQuantity) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{8} +} +func (m *ListOfTopologyAwareQuantity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOfTopologyAwareQuantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListOfTopologyAwareQuantity.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListOfTopologyAwareQuantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOfTopologyAwareQuantity.Merge(m, src) +} +func (m *ListOfTopologyAwareQuantity) XXX_Size() int { + return m.Size() +} +func (m *ListOfTopologyAwareQuantity) XXX_DiscardUnknown() { + xxx_messageInfo_ListOfTopologyAwareQuantity.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOfTopologyAwareQuantity proto.InternalMessageInfo + +func (m *ListOfTopologyAwareQuantity) GetTopologyAwareQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.TopologyAwareQuantityList + } + return nil +} + +type TopologyAwareQuantity struct { + ResourceValue string `protobuf:"bytes,1,opt,name=resource_value,json=resourceValue,proto3" json:"resource_value,omitempty"` + Nodes uint64 `protobuf:"varint,2,opt,name=nodes,proto3" json:"nodes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyAwareQuantity) Reset() { *m = TopologyAwareQuantity{} } +func (*TopologyAwareQuantity) ProtoMessage() {} +func (*TopologyAwareQuantity) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{9} +} +func (m *TopologyAwareQuantity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologyAwareQuantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopologyAwareQuantity.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopologyAwareQuantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyAwareQuantity.Merge(m, src) +} +func (m *TopologyAwareQuantity) XXX_Size() int { + return m.Size() +} +func (m *TopologyAwareQuantity) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyAwareQuantity.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyAwareQuantity proto.InternalMessageInfo + +func (m *TopologyAwareQuantity) GetResourceValue() string { + if m != nil { + return m.ResourceValue + } + return "" +} + +func (m *TopologyAwareQuantity) GetNodes() uint64 { + if m != nil { + return m.Nodes + } + return 0 +} + func init() { proto.RegisterType((*ListPodResourcesRequest)(nil), "v1alpha1.ListPodResourcesRequest") proto.RegisterType((*ListPodResourcesResponse)(nil), "v1alpha1.ListPodResourcesResponse") proto.RegisterType((*PodResources)(nil), "v1alpha1.PodResources") proto.RegisterType((*ContainerResources)(nil), "v1alpha1.ContainerResources") proto.RegisterType((*ContainerDevices)(nil), "v1alpha1.ContainerDevices") + proto.RegisterType((*PodTopologyAwareResources)(nil), "v1alpha1.PodTopologyAwareResources") + proto.RegisterType((*ContainerTopologyAwareResources)(nil), "v1alpha1.ContainerTopologyAwareResources") + proto.RegisterType((*TopologyAwareResources)(nil), "v1alpha1.TopologyAwareResources") + proto.RegisterMapType((map[string]*ListOfTopologyAwareQuantity)(nil), "v1alpha1.TopologyAwareResources.TopologyAwareResourcesEntry") + proto.RegisterType((*ListOfTopologyAwareQuantity)(nil), "v1alpha1.ListOfTopologyAwareQuantity") + proto.RegisterType((*TopologyAwareQuantity)(nil), "v1alpha1.TopologyAwareQuantity") } func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 343 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0xb1, 0x4e, 0xc3, 0x30, - 0x10, 0xad, 0xdb, 0x0a, 0xc8, 0xd1, 0x4a, 0xc8, 0x03, 0x84, 0xaa, 0x58, 0xc5, 0x2c, 0x5d, 0x48, - 0xd5, 0xc2, 0x06, 0x13, 0xb0, 0x20, 0x21, 0x40, 0x19, 0x60, 0xa3, 0x4a, 0x13, 0xd3, 0x46, 0xa2, - 0xb1, 0x89, 0x93, 0x8e, 0x88, 0x4f, 0xe0, 0xb3, 0x3a, 0x32, 0x32, 0xd2, 0xf0, 0x23, 0x28, 0xb6, - 0xac, 0x04, 0x5a, 0x98, 0x7c, 0x77, 0xef, 0x9d, 0xdf, 0xf3, 0x9d, 0xc1, 0xf2, 0x44, 0xe8, 0x88, - 0x98, 0x27, 0x1c, 0x6f, 0xcc, 0xfa, 0xde, 0x93, 0x98, 0x78, 0xfd, 0xd6, 0xe1, 0x38, 0x4c, 0x26, - 0xe9, 0xc8, 0xf1, 0xf9, 0xb4, 0x37, 0xe6, 0x63, 0xde, 0x53, 0x84, 0x51, 0xfa, 0xa8, 0x32, 0x95, - 0xa8, 0x48, 0x37, 0xd2, 0x5d, 0xd8, 0xb9, 0x0a, 0x65, 0x72, 0xcb, 0x03, 0x97, 0x49, 0x9e, 0xc6, - 0x3e, 0x93, 0x2e, 0x7b, 0x4e, 0x99, 0x4c, 0xe8, 0x3d, 0xd8, 0xcb, 0x90, 0x14, 0x3c, 0x92, 0x0c, - 0x9f, 0x40, 0x53, 0xf0, 0x60, 0x18, 0x1b, 0xc0, 0x46, 0x9d, 0x5a, 0x77, 0x73, 0xb0, 0xed, 0x18, - 0x1f, 0xce, 0x8f, 0xb6, 0x86, 0x28, 0x65, 0xf4, 0x05, 0x1a, 0x65, 0x14, 0x63, 0xa8, 0x47, 0xde, - 0x94, 0xd9, 0xa8, 0x83, 0xba, 0x96, 0xab, 0x62, 0xdc, 0x06, 0x2b, 0x3f, 0xa5, 0xf0, 0x7c, 0x66, - 0x57, 0x15, 0x50, 0x14, 0xf0, 0x29, 0x80, 0xcf, 0xa3, 0xc4, 0x0b, 0x23, 0x16, 0x4b, 0xbb, 0xa6, - 0xb4, 0xdb, 0x85, 0xf6, 0xb9, 0xc1, 0x0a, 0x07, 0x25, 0x3e, 0x7d, 0x00, 0xbc, 0xcc, 0x58, 0xe9, - 0xe2, 0x18, 0xd6, 0x03, 0x36, 0x0b, 0xf3, 0x07, 0x56, 0x95, 0x48, 0x6b, 0x85, 0xc8, 0x85, 0x66, - 0xb8, 0x86, 0x4a, 0xef, 0x60, 0xeb, 0x37, 0x88, 0x0f, 0xa0, 0x69, 0x86, 0x35, 0x2c, 0xc9, 0x34, - 0x4c, 0xf1, 0x3a, 0x97, 0xdb, 0x03, 0xd0, 0x77, 0x0c, 0xc3, 0x40, 0x2b, 0x5a, 0xae, 0xa5, 0x2b, - 0x97, 0x81, 0x1c, 0x30, 0xc0, 0xe5, 0xb9, 0xe5, 0xcb, 0x61, 0x31, 0xbe, 0x81, 0x7a, 0x1e, 0xe1, - 0xfd, 0xc2, 0xda, 0x1f, 0x1b, 0x6d, 0xd1, 0xff, 0x28, 0x7a, 0xb3, 0xb4, 0x72, 0xd6, 0x9e, 0x2f, - 0x08, 0xfa, 0x58, 0x90, 0xca, 0x6b, 0x46, 0xd0, 0x3c, 0x23, 0xe8, 0x3d, 0x23, 0xe8, 0x33, 0x23, - 0xe8, 0xed, 0x8b, 0x54, 0x46, 0x6b, 0xea, 0xdf, 0x1c, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0xc0, - 0xce, 0xf2, 0x80, 0x7d, 0x02, 0x00, 0x00, + // 620 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xcb, 0x72, 0xd3, 0x3c, + 0x18, 0xad, 0x93, 0xf4, 0x92, 0xaf, 0x97, 0xe9, 0xe8, 0xff, 0x69, 0x9d, 0x34, 0xb8, 0x41, 0x9d, + 0xcc, 0x94, 0x05, 0xe9, 0x34, 0xb0, 0x60, 0x68, 0x37, 0xdc, 0x16, 0xcc, 0x30, 0xb4, 0xf5, 0x94, + 0xb2, 0xc3, 0x28, 0xb6, 0x9a, 0x7a, 0x70, 0x2d, 0xd7, 0x96, 0xc3, 0x64, 0x03, 0x0c, 0x4f, 0xc0, + 0x9e, 0x17, 0xea, 0x92, 0x25, 0x4b, 0x1a, 0xb6, 0x3c, 0x04, 0x23, 0xc9, 0x8e, 0x9d, 0xd6, 0x49, + 0x57, 0x96, 0xbe, 0x73, 0x3e, 0x9d, 0xa3, 0x63, 0x49, 0x50, 0x25, 0x81, 0xdb, 0x0e, 0x42, 0xc6, + 0x19, 0x5a, 0xe8, 0xef, 0x12, 0x2f, 0x38, 0x23, 0xbb, 0xf5, 0x07, 0x3d, 0x97, 0x9f, 0xc5, 0xdd, + 0xb6, 0xcd, 0xce, 0x77, 0x7a, 0xac, 0xc7, 0x76, 0x24, 0xa1, 0x1b, 0x9f, 0xca, 0x99, 0x9c, 0xc8, + 0x91, 0x6a, 0xc4, 0x35, 0x58, 0x7f, 0xed, 0x46, 0xfc, 0x90, 0x39, 0x26, 0x8d, 0x58, 0x1c, 0xda, + 0x34, 0x32, 0xe9, 0x45, 0x4c, 0x23, 0x8e, 0xdf, 0x81, 0x7e, 0x13, 0x8a, 0x02, 0xe6, 0x47, 0x14, + 0xed, 0xc1, 0x72, 0xc0, 0x1c, 0x2b, 0x4c, 0x01, 0x5d, 0x6b, 0x96, 0xb7, 0x17, 0x3b, 0x6b, 0xed, + 0xd4, 0x47, 0x7b, 0xac, 0x6d, 0x29, 0xc8, 0xcd, 0xf0, 0x67, 0x58, 0xca, 0xa3, 0x08, 0x41, 0xc5, + 0x27, 0xe7, 0x54, 0xd7, 0x9a, 0xda, 0x76, 0xd5, 0x94, 0x63, 0xd4, 0x80, 0xaa, 0xf8, 0x46, 0x01, + 0xb1, 0xa9, 0x5e, 0x92, 0x40, 0x56, 0x40, 0xfb, 0x00, 0x36, 0xf3, 0x39, 0x71, 0x7d, 0x1a, 0x46, + 0x7a, 0x59, 0x6a, 0x37, 0x32, 0xed, 0xe7, 0x29, 0x96, 0x39, 0xc8, 0xf1, 0xf1, 0x7b, 0x40, 0x37, + 0x19, 0x85, 0x2e, 0x1e, 0xc1, 0xbc, 0x43, 0xfb, 0xae, 0xd8, 0x60, 0x49, 0x8a, 0xd4, 0x0b, 0x44, + 0x5e, 0x28, 0x86, 0x99, 0x52, 0xf1, 0x09, 0xac, 0x5e, 0x07, 0xd1, 0x16, 0x2c, 0xa7, 0x61, 0x59, + 0x39, 0x99, 0xa5, 0xb4, 0xf8, 0x46, 0xc8, 0xdd, 0x05, 0x50, 0x6b, 0x58, 0xae, 0xa3, 0x14, 0xab, + 0x66, 0x55, 0x55, 0x5e, 0x39, 0x11, 0xfe, 0xab, 0x41, 0xed, 0x90, 0x39, 0xc7, 0x2c, 0x60, 0x1e, + 0xeb, 0x0d, 0x9e, 0x7e, 0x22, 0x21, 0xcd, 0xfc, 0xaf, 0xc3, 0xbc, 0xf8, 0x25, 0xb1, 0xeb, 0x24, + 0x6b, 0xcf, 0x05, 0xcc, 0x79, 0xeb, 0x3a, 0xa8, 0x06, 0x0b, 0x02, 0x90, 0xaa, 0x2a, 0x49, 0x41, + 0x94, 0x82, 0x5b, 0xea, 0x37, 0x66, 0x49, 0x97, 0x95, 0xab, 0x04, 0x57, 0x61, 0xf7, 0x01, 0x8f, + 0xc2, 0xb3, 0x78, 0x22, 0x6e, 0x11, 0xa1, 0x9e, 0x3b, 0x00, 0x15, 0x99, 0xcf, 0xfd, 0x82, 0x7c, + 0x8a, 0xfd, 0x9a, 0x9b, 0xf6, 0x74, 0x02, 0xfe, 0xa1, 0xc1, 0xe6, 0x2d, 0x8b, 0xa0, 0x16, 0xac, + 0x64, 0xde, 0x72, 0xb9, 0x2e, 0x8f, 0xaa, 0x72, 0x9f, 0x47, 0xf0, 0x1f, 0xf1, 0x3c, 0x66, 0x13, + 0x4e, 0xf3, 0x87, 0x56, 0xa4, 0xb1, 0xd8, 0x69, 0x66, 0x9e, 0x27, 0x58, 0x45, 0xa3, 0xe6, 0xcc, + 0xdd, 0xb7, 0x12, 0xac, 0x4d, 0x30, 0xd5, 0x07, 0x7d, 0x62, 0x4c, 0xea, 0x9e, 0xec, 0xdf, 0x26, + 0x39, 0xa1, 0xfc, 0xd2, 0xe7, 0xe1, 0xc0, 0x5c, 0xe3, 0x85, 0x60, 0x3d, 0x80, 0x8d, 0x29, 0x6d, + 0x68, 0x15, 0xca, 0x1f, 0xe9, 0x20, 0x09, 0x48, 0x0c, 0xd1, 0x1e, 0xcc, 0xf6, 0x89, 0x17, 0xd3, + 0x24, 0x88, 0x56, 0xe6, 0x4a, 0x5c, 0xfc, 0x83, 0xd3, 0xb1, 0xd5, 0x8e, 0x62, 0xe2, 0x73, 0x97, + 0x0f, 0x4c, 0xd5, 0xf3, 0xa4, 0xf4, 0x58, 0xc3, 0x5f, 0x60, 0x63, 0x0a, 0x13, 0x7d, 0x80, 0xc6, + 0xb5, 0x20, 0x2e, 0x12, 0xc8, 0xf2, 0xdc, 0x88, 0x27, 0x61, 0x6c, 0x4e, 0x08, 0x63, 0x24, 0x58, + 0xe3, 0x45, 0x65, 0x21, 0x8c, 0x8f, 0xe1, 0x4e, 0xb1, 0x74, 0x0b, 0x56, 0x46, 0xf7, 0x4d, 0xed, + 0x31, 0x39, 0x18, 0x69, 0xf5, 0x44, 0x14, 0xd1, 0xff, 0x30, 0xeb, 0x33, 0x27, 0x39, 0x0a, 0x15, + 0x53, 0x4d, 0x3a, 0x14, 0x50, 0xfe, 0x81, 0x12, 0x4a, 0x34, 0x44, 0x07, 0x50, 0x11, 0x23, 0x74, + 0x6f, 0x3c, 0xa6, 0x82, 0xa7, 0xb3, 0x8e, 0xa7, 0x51, 0xd4, 0x13, 0x8a, 0x67, 0x9e, 0x35, 0x2e, + 0xaf, 0x0c, 0xed, 0xd7, 0x95, 0x31, 0xf3, 0x75, 0x68, 0x68, 0x97, 0x43, 0x43, 0xfb, 0x39, 0x34, + 0xb4, 0xdf, 0x43, 0x43, 0xfb, 0xfe, 0xc7, 0x98, 0xe9, 0xce, 0xc9, 0x07, 0xfa, 0xe1, 0xbf, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x19, 0xe1, 0x50, 0x0a, 0xe6, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -609,78 +900,299 @@ func (m *ContainerDevices) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PodTopologyAwareResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *ListPodResourcesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n + +func (m *PodTopologyAwareResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ListPodResourcesResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *PodTopologyAwareResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.PodResources) > 0 { - for _, e := range m.PodResources { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) + if len(m.ContainerTopologyAwareResources) > 0 { + for iNdEx := len(m.ContainerTopologyAwareResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerTopologyAwareResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } } - return n -} - -func (m *PodResources) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) + if len(m.PodNamespace) > 0 { + i -= len(m.PodNamespace) + copy(dAtA[i:], m.PodNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodNamespace))) + i-- + dAtA[i] = 0x1a } - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x12 } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ContainerResources) Size() (n int) { - if m == nil { - return 0 +func (m *ContainerTopologyAwareResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ContainerTopologyAwareResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerTopologyAwareResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - if len(m.Devices) > 0 { - for _, e := range m.Devices { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) + if m.AllocatedResources != nil { + { + size, err := m.AllocatedResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerName) > 0 { + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TopologyAwareResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologyAwareResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologyAwareResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TopologyAwareResources) > 0 { + for k := range m.TopologyAwareResources { + v := m.TopologyAwareResources[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListOfTopologyAwareQuantity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListOfTopologyAwareQuantity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOfTopologyAwareQuantity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TopologyAwareQuantityList) > 0 { + for iNdEx := len(m.TopologyAwareQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologyAwareQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TopologyAwareQuantity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologyAwareQuantity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologyAwareQuantity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nodes != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Nodes)) + i-- + dAtA[i] = 0x10 + } + if len(m.ResourceValue) > 0 { + i -= len(m.ResourceValue) + copy(dAtA[i:], m.ResourceValue) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ListPodResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ListPodResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodResources) > 0 { + for _, e := range m.PodResources { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *PodResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) } } return n @@ -705,6 +1217,103 @@ func (m *ContainerDevices) Size() (n int) { return n } +func (m *PodTopologyAwareResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.ContainerTopologyAwareResources) > 0 { + for _, e := range m.ContainerTopologyAwareResources { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerTopologyAwareResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.AllocatedResources != nil { + l = m.AllocatedResources.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *TopologyAwareResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TopologyAwareResources) > 0 { + for k, v := range m.TopologyAwareResources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListOfTopologyAwareQuantity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TopologyAwareQuantityList) > 0 { + for _, e := range m.TopologyAwareQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *TopologyAwareQuantity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceValue) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Nodes != 0 { + n += 1 + sovApi(uint64(m.Nodes)) + } + return n +} + func sovApi(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -779,43 +1388,592 @@ func (this *ContainerDevices) String() string { }, "") return s } -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { +func (this *PodTopologyAwareResources) String() string { + if this == nil { return "nil" } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + repeatedStringForContainerTopologyAwareResources := "[]*ContainerTopologyAwareResources{" + for _, f := range this.ContainerTopologyAwareResources { + repeatedStringForContainerTopologyAwareResources += strings.Replace(f.String(), "ContainerTopologyAwareResources", "ContainerTopologyAwareResources", 1) + "," + } + repeatedStringForContainerTopologyAwareResources += "}" + s := strings.Join([]string{`&PodTopologyAwareResources{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `PodNamespace:` + fmt.Sprintf("%v", this.PodNamespace) + `,`, + `ContainerTopologyAwareResources:` + repeatedStringForContainerTopologyAwareResources + `,`, + `}`, + }, "") + return s } -func (m *ListPodResourcesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi +func (this *ContainerTopologyAwareResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerTopologyAwareResources{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `AllocatedResources:` + strings.Replace(this.AllocatedResources.String(), "TopologyAwareResources", "TopologyAwareResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TopologyAwareResources) String() string { + if this == nil { + return "nil" + } + keysForTopologyAwareResources := make([]string, 0, len(this.TopologyAwareResources)) + for k := range this.TopologyAwareResources { + keysForTopologyAwareResources = append(keysForTopologyAwareResources, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopologyAwareResources) + mapStringForTopologyAwareResources := "map[string]*ListOfTopologyAwareQuantity{" + for _, k := range keysForTopologyAwareResources { + mapStringForTopologyAwareResources += fmt.Sprintf("%v: %v,", k, this.TopologyAwareResources[k]) + } + mapStringForTopologyAwareResources += "}" + s := strings.Join([]string{`&TopologyAwareResources{`, + `TopologyAwareResources:` + mapStringForTopologyAwareResources + `,`, + `}`, + }, "") + return s +} +func (this *ListOfTopologyAwareQuantity) String() string { + if this == nil { + return "nil" + } + repeatedStringForTopologyAwareQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.TopologyAwareQuantityList { + repeatedStringForTopologyAwareQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForTopologyAwareQuantityList += "}" + s := strings.Join([]string{`&ListOfTopologyAwareQuantity{`, + `TopologyAwareQuantityList:` + repeatedStringForTopologyAwareQuantityList + `,`, + `}`, + }, "") + return s +} +func (this *TopologyAwareQuantity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologyAwareQuantity{`, + `ResourceValue:` + fmt.Sprintf("%v", this.ResourceValue) + `,`, + `Nodes:` + fmt.Sprintf("%v", this.Nodes) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ListPodResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodResources = append(m.PodResources, &PodResources{}) + if err := m.PodResources[len(m.PodResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, &ContainerResources{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, &ContainerDevices{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerDevices) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerDevices: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerDevices: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi } - if iNdEx >= l { - return io.ErrUnexpectedEOF + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListPodResourcesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListPodResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.DeviceIds = append(m.DeviceIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -840,7 +1998,7 @@ func (m *ListPodResourcesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ListPodResourcesResponse) Unmarshal(dAtA []byte) error { +func (m *PodTopologyAwareResources) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -863,15 +2021,111 @@ func (m *ListPodResourcesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ListPodResourcesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PodTopologyAwareResources: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ListPodResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodTopologyAwareResources: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodResources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTopologyAwareResources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -898,8 +2152,8 @@ func (m *ListPodResourcesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PodResources = append(m.PodResources, &PodResources{}) - if err := m.PodResources[len(m.PodResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ContainerTopologyAwareResources = append(m.ContainerTopologyAwareResources, &ContainerTopologyAwareResources{}) + if err := m.ContainerTopologyAwareResources[len(m.ContainerTopologyAwareResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -927,7 +2181,7 @@ func (m *ListPodResourcesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodResources) Unmarshal(dAtA []byte) error { +func (m *ContainerTopologyAwareResources) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -950,15 +2204,15 @@ func (m *PodResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodResources: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerTopologyAwareResources: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerTopologyAwareResources: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -986,43 +2240,11 @@ func (m *PodResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1049,8 +2271,10 @@ func (m *PodResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Containers = append(m.Containers, &ContainerResources{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.AllocatedResources == nil { + m.AllocatedResources = &TopologyAwareResources{} + } + if err := m.AllocatedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1078,7 +2302,7 @@ func (m *PodResources) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerResources) Unmarshal(dAtA []byte) error { +func (m *TopologyAwareResources) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1101,17 +2325,17 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerResources: wiretype end group for non-group") + return fmt.Errorf("proto: TopologyAwareResources: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TopologyAwareResources: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareResources", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -1121,27 +2345,177 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthApi } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthApi } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.TopologyAwareResources == nil { + m.TopologyAwareResources = make(map[string]*ListOfTopologyAwareQuantity) + } + var mapkey string + var mapvalue *ListOfTopologyAwareQuantity + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ListOfTopologyAwareQuantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TopologyAwareResources[mapkey] = mapvalue iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOfTopologyAwareQuantity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOfTopologyAwareQuantity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOfTopologyAwareQuantity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareQuantityList", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1168,8 +2542,8 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Devices = append(m.Devices, &ContainerDevices{}) - if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TopologyAwareQuantityList = append(m.TopologyAwareQuantityList, &TopologyAwareQuantity{}) + if err := m.TopologyAwareQuantityList[len(m.TopologyAwareQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1197,7 +2571,7 @@ func (m *ContainerResources) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerDevices) Unmarshal(dAtA []byte) error { +func (m *TopologyAwareQuantity) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1220,15 +2594,15 @@ func (m *ContainerDevices) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerDevices: wiretype end group for non-group") + return fmt.Errorf("proto: TopologyAwareQuantity: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerDevices: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TopologyAwareQuantity: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceValue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1256,13 +2630,13 @@ func (m *ContainerDevices) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceName = string(dAtA[iNdEx:postIndex]) + m.ResourceValue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceIds", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } - var stringLen uint64 + m.Nodes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -1272,24 +2646,11 @@ func (m *ContainerDevices) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Nodes |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeviceIds = append(m.DeviceIds, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) diff --git a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto index 782ac714b6a4e..c7f46a665e453 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto +++ b/staging/src/k8s.io/kubelet/pkg/apis/podresources/v1alpha1/api.proto @@ -45,4 +45,31 @@ message ContainerResources { message ContainerDevices { string resource_name = 1; repeated string device_ids = 2; -} \ No newline at end of file +} + +// PodTopologyAwareResources contains information about the resources assigned to a container, +// and organized as topology aware format. +message PodTopologyAwareResources { + string pod_uid = 1; + string pod_name = 2; + string pod_namespace = 3; + repeated ContainerTopologyAwareResources container_topology_aware_resources = 4; +} + +message ContainerTopologyAwareResources { + string container_name = 1; + TopologyAwareResources allocated_resources = 2; +} + +message TopologyAwareResources { + map topology_aware_resources = 1; +} + +message ListOfTopologyAwareQuantity { + repeated TopologyAwareQuantity topology_aware_quantity_list = 1; +} + +message TopologyAwareQuantity { + string resource_value = 1; + uint64 nodes = 2; +} diff --git a/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/OWNERS b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/OWNERS new file mode 100644 index 0000000000000..5f84c16b336fb --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/OWNERS @@ -0,0 +1,3 @@ +reviewers: +- sunjianyu +- shaowei diff --git a/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/BUILD b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/BUILD new file mode 100644 index 0000000000000..b0cec4470bedf --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "api.pb.go", + "constants.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1", + importpath = "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1", + deps = [ + "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/api.pb.go new file mode 100644 index 0000000000000..3a0a0a7a0a766 --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/api.pb.go @@ -0,0 +1,10268 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: api.proto + +package v1alpha1 + +import ( + context "context" + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ContainerType int32 + +const ( + ContainerType_INIT ContainerType = 0 + ContainerType_MAIN ContainerType = 1 + ContainerType_SIDECAR ContainerType = 2 + ContainerType_EPHEMERAL ContainerType = 3 +) + +var ContainerType_name = map[int32]string{ + 0: "INIT", + 1: "MAIN", + 2: "SIDECAR", + 3: "EPHEMERAL", +} + +var ContainerType_value = map[string]int32{ + "INIT": 0, + "MAIN": 1, + "SIDECAR": 2, + "EPHEMERAL": 3, +} + +func (x ContainerType) String() string { + return proto.EnumName(ContainerType_name, int32(x)) +} + +func (ContainerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{0} +} + +type ResourcePluginOptions struct { + // Indicates if PreStartContainer call is required before each container start + PreStartRequired bool `protobuf:"varint,1,opt,name=pre_start_required,json=preStartRequired,proto3" json:"pre_start_required,omitempty"` + // Indicates if the resource this plugin managed needs topology alignment + WithTopologyAlignment bool `protobuf:"varint,2,opt,name=with_topology_alignment,json=withTopologyAlignment,proto3" json:"with_topology_alignment,omitempty"` + // Indicates if the resource needs reconciling allocation result + NeedReconcile bool `protobuf:"varint,3,opt,name=need_reconcile,json=needReconcile,proto3" json:"need_reconcile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourcePluginOptions) Reset() { *m = ResourcePluginOptions{} } +func (*ResourcePluginOptions) ProtoMessage() {} +func (*ResourcePluginOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{0} +} +func (m *ResourcePluginOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePluginOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourcePluginOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourcePluginOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePluginOptions.Merge(m, src) +} +func (m *ResourcePluginOptions) XXX_Size() int { + return m.Size() +} +func (m *ResourcePluginOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePluginOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePluginOptions proto.InternalMessageInfo + +func (m *ResourcePluginOptions) GetPreStartRequired() bool { + if m != nil { + return m.PreStartRequired + } + return false +} + +func (m *ResourcePluginOptions) GetWithTopologyAlignment() bool { + if m != nil { + return m.WithTopologyAlignment + } + return false +} + +func (m *ResourcePluginOptions) GetNeedReconcile() bool { + if m != nil { + return m.NeedReconcile + } + return false +} + +type RegisterRequest struct { + // Version of the API the Resource Plugin was built against + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the unix socket the resource plugin is listening on + // PATH = path.Join(ResourcePluginPath, endpoint) + Endpoint string `protobuf:"bytes,2,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // Schedulable resource name. As of now it's expected to be a DNS Label + ResourceName string `protobuf:"bytes,3,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // Options to be communicated with Resource Manager + Options *ResourcePluginOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegisterRequest) Reset() { *m = RegisterRequest{} } +func (*RegisterRequest) ProtoMessage() {} +func (*RegisterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{1} +} +func (m *RegisterRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegisterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegisterRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RegisterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterRequest.Merge(m, src) +} +func (m *RegisterRequest) XXX_Size() int { + return m.Size() +} +func (m *RegisterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RegisterRequest proto.InternalMessageInfo + +func (m *RegisterRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *RegisterRequest) GetEndpoint() string { + if m != nil { + return m.Endpoint + } + return "" +} + +func (m *RegisterRequest) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *RegisterRequest) GetOptions() *ResourcePluginOptions { + if m != nil { + return m.Options + } + return nil +} + +type ResourceRequest struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + PodNamespace string `protobuf:"bytes,2,opt,name=pod_namespace,json=podNamespace,proto3" json:"pod_namespace,omitempty"` + PodName string `protobuf:"bytes,3,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + ContainerName string `protobuf:"bytes,4,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + ContainerType ContainerType `protobuf:"varint,5,opt,name=container_type,json=containerType,proto3,enum=resourceplugin.v1alpha1.ContainerType" json:"container_type,omitempty"` + ContainerIndex uint64 `protobuf:"varint,6,opt,name=container_index,json=containerIndex,proto3" json:"container_index,omitempty"` + PodRole string `protobuf:"bytes,7,opt,name=pod_role,json=podRole,proto3" json:"pod_role,omitempty"` + PodType string `protobuf:"bytes,8,opt,name=pod_type,json=podType,proto3" json:"pod_type,omitempty"` + ResourceName string `protobuf:"bytes,9,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + Hint *TopologyHint `protobuf:"bytes,10,opt,name=hint,proto3" json:"hint,omitempty"` + ResourceRequests map[string]float64 `protobuf:"bytes,11,rep,name=resource_requests,json=resourceRequests,proto3" json:"resource_requests,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,12,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,13,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceRequest) Reset() { *m = ResourceRequest{} } +func (*ResourceRequest) ProtoMessage() {} +func (*ResourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{2} +} +func (m *ResourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRequest.Merge(m, src) +} +func (m *ResourceRequest) XXX_Size() int { + return m.Size() +} +func (m *ResourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRequest proto.InternalMessageInfo + +func (m *ResourceRequest) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +func (m *ResourceRequest) GetPodNamespace() string { + if m != nil { + return m.PodNamespace + } + return "" +} + +func (m *ResourceRequest) GetPodName() string { + if m != nil { + return m.PodName + } + return "" +} + +func (m *ResourceRequest) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +func (m *ResourceRequest) GetContainerType() ContainerType { + if m != nil { + return m.ContainerType + } + return ContainerType_INIT +} + +func (m *ResourceRequest) GetContainerIndex() uint64 { + if m != nil { + return m.ContainerIndex + } + return 0 +} + +func (m *ResourceRequest) GetPodRole() string { + if m != nil { + return m.PodRole + } + return "" +} + +func (m *ResourceRequest) GetPodType() string { + if m != nil { + return m.PodType + } + return "" +} + +func (m *ResourceRequest) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ResourceRequest) GetHint() *TopologyHint { + if m != nil { + return m.Hint + } + return nil +} + +func (m *ResourceRequest) GetResourceRequests() map[string]float64 { + if m != nil { + return m.ResourceRequests + } + return nil +} + +func (m *ResourceRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ResourceRequest) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +type ResourceHintsResponse struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + PodNamespace string `protobuf:"bytes,2,opt,name=pod_namespace,json=podNamespace,proto3" json:"pod_namespace,omitempty"` + PodName string `protobuf:"bytes,3,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + ContainerName string `protobuf:"bytes,4,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + ContainerType ContainerType `protobuf:"varint,5,opt,name=container_type,json=containerType,proto3,enum=resourceplugin.v1alpha1.ContainerType" json:"container_type,omitempty"` + ContainerIndex uint64 `protobuf:"varint,6,opt,name=container_index,json=containerIndex,proto3" json:"container_index,omitempty"` + PodRole string `protobuf:"bytes,7,opt,name=pod_role,json=podRole,proto3" json:"pod_role,omitempty"` + PodType string `protobuf:"bytes,8,opt,name=pod_type,json=podType,proto3" json:"pod_type,omitempty"` + ResourceName string `protobuf:"bytes,9,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + ResourceHints map[string]*ListOfTopologyHints `protobuf:"bytes,10,rep,name=resource_hints,json=resourceHints,proto3" json:"resource_hints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,11,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,12,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceHintsResponse) Reset() { *m = ResourceHintsResponse{} } +func (*ResourceHintsResponse) ProtoMessage() {} +func (*ResourceHintsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{3} +} +func (m *ResourceHintsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHintsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceHintsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceHintsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHintsResponse.Merge(m, src) +} +func (m *ResourceHintsResponse) XXX_Size() int { + return m.Size() +} +func (m *ResourceHintsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHintsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHintsResponse proto.InternalMessageInfo + +func (m *ResourceHintsResponse) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +func (m *ResourceHintsResponse) GetPodNamespace() string { + if m != nil { + return m.PodNamespace + } + return "" +} + +func (m *ResourceHintsResponse) GetPodName() string { + if m != nil { + return m.PodName + } + return "" +} + +func (m *ResourceHintsResponse) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +func (m *ResourceHintsResponse) GetContainerType() ContainerType { + if m != nil { + return m.ContainerType + } + return ContainerType_INIT +} + +func (m *ResourceHintsResponse) GetContainerIndex() uint64 { + if m != nil { + return m.ContainerIndex + } + return 0 +} + +func (m *ResourceHintsResponse) GetPodRole() string { + if m != nil { + return m.PodRole + } + return "" +} + +func (m *ResourceHintsResponse) GetPodType() string { + if m != nil { + return m.PodType + } + return "" +} + +func (m *ResourceHintsResponse) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ResourceHintsResponse) GetResourceHints() map[string]*ListOfTopologyHints { + if m != nil { + return m.ResourceHints + } + return nil +} + +func (m *ResourceHintsResponse) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ResourceHintsResponse) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +type ResourceAllocationResponse struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + PodNamespace string `protobuf:"bytes,2,opt,name=pod_namespace,json=podNamespace,proto3" json:"pod_namespace,omitempty"` + PodName string `protobuf:"bytes,3,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + ContainerName string `protobuf:"bytes,4,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + ContainerType ContainerType `protobuf:"varint,5,opt,name=container_type,json=containerType,proto3,enum=resourceplugin.v1alpha1.ContainerType" json:"container_type,omitempty"` + ContainerIndex uint64 `protobuf:"varint,6,opt,name=container_index,json=containerIndex,proto3" json:"container_index,omitempty"` + PodRole string `protobuf:"bytes,7,opt,name=pod_role,json=podRole,proto3" json:"pod_role,omitempty"` + PodType string `protobuf:"bytes,8,opt,name=pod_type,json=podType,proto3" json:"pod_type,omitempty"` + ResourceName string `protobuf:"bytes,9,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + AllocationResult *ResourceAllocation `protobuf:"bytes,10,opt,name=allocation_result,json=allocationResult,proto3" json:"allocation_result,omitempty"` + Labels map[string]string `protobuf:"bytes,11,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,12,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceAllocationResponse) Reset() { *m = ResourceAllocationResponse{} } +func (*ResourceAllocationResponse) ProtoMessage() {} +func (*ResourceAllocationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{4} +} +func (m *ResourceAllocationResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAllocationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAllocationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAllocationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAllocationResponse.Merge(m, src) +} +func (m *ResourceAllocationResponse) XXX_Size() int { + return m.Size() +} +func (m *ResourceAllocationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAllocationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAllocationResponse proto.InternalMessageInfo + +func (m *ResourceAllocationResponse) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +func (m *ResourceAllocationResponse) GetPodNamespace() string { + if m != nil { + return m.PodNamespace + } + return "" +} + +func (m *ResourceAllocationResponse) GetPodName() string { + if m != nil { + return m.PodName + } + return "" +} + +func (m *ResourceAllocationResponse) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +func (m *ResourceAllocationResponse) GetContainerType() ContainerType { + if m != nil { + return m.ContainerType + } + return ContainerType_INIT +} + +func (m *ResourceAllocationResponse) GetContainerIndex() uint64 { + if m != nil { + return m.ContainerIndex + } + return 0 +} + +func (m *ResourceAllocationResponse) GetPodRole() string { + if m != nil { + return m.PodRole + } + return "" +} + +func (m *ResourceAllocationResponse) GetPodType() string { + if m != nil { + return m.PodType + } + return "" +} + +func (m *ResourceAllocationResponse) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ResourceAllocationResponse) GetAllocationResult() *ResourceAllocation { + if m != nil { + return m.AllocationResult + } + return nil +} + +func (m *ResourceAllocationResponse) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ResourceAllocationResponse) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +type ListOfTopologyHints struct { + Hints []*TopologyHint `protobuf:"bytes,1,rep,name=hints,proto3" json:"hints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListOfTopologyHints) Reset() { *m = ListOfTopologyHints{} } +func (*ListOfTopologyHints) ProtoMessage() {} +func (*ListOfTopologyHints) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{5} +} +func (m *ListOfTopologyHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOfTopologyHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListOfTopologyHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListOfTopologyHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOfTopologyHints.Merge(m, src) +} +func (m *ListOfTopologyHints) XXX_Size() int { + return m.Size() +} +func (m *ListOfTopologyHints) XXX_DiscardUnknown() { + xxx_messageInfo_ListOfTopologyHints.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOfTopologyHints proto.InternalMessageInfo + +func (m *ListOfTopologyHints) GetHints() []*TopologyHint { + if m != nil { + return m.Hints + } + return nil +} + +type TopologyHint struct { + Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes,proto3" json:"nodes,omitempty"` + Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyHint) Reset() { *m = TopologyHint{} } +func (*TopologyHint) ProtoMessage() {} +func (*TopologyHint) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{6} +} +func (m *TopologyHint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologyHint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopologyHint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopologyHint) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyHint.Merge(m, src) +} +func (m *TopologyHint) XXX_Size() int { + return m.Size() +} +func (m *TopologyHint) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyHint.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyHint proto.InternalMessageInfo + +func (m *TopologyHint) GetNodes() []uint64 { + if m != nil { + return m.Nodes + } + return nil +} + +func (m *TopologyHint) GetPreferred() bool { + if m != nil { + return m.Preferred + } + return false +} + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{7} +} +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +type RemovePodRequest struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemovePodRequest) Reset() { *m = RemovePodRequest{} } +func (*RemovePodRequest) ProtoMessage() {} +func (*RemovePodRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{8} +} +func (m *RemovePodRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemovePodRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemovePodRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemovePodRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemovePodRequest.Merge(m, src) +} +func (m *RemovePodRequest) XXX_Size() int { + return m.Size() +} +func (m *RemovePodRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemovePodRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemovePodRequest proto.InternalMessageInfo + +func (m *RemovePodRequest) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +type RemovePodResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemovePodResponse) Reset() { *m = RemovePodResponse{} } +func (*RemovePodResponse) ProtoMessage() {} +func (*RemovePodResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{9} +} +func (m *RemovePodResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemovePodResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemovePodResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemovePodResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemovePodResponse.Merge(m, src) +} +func (m *RemovePodResponse) XXX_Size() int { + return m.Size() +} +func (m *RemovePodResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemovePodResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemovePodResponse proto.InternalMessageInfo + +type GetResourcesAllocationRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResourcesAllocationRequest) Reset() { *m = GetResourcesAllocationRequest{} } +func (*GetResourcesAllocationRequest) ProtoMessage() {} +func (*GetResourcesAllocationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{10} +} +func (m *GetResourcesAllocationRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetResourcesAllocationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetResourcesAllocationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetResourcesAllocationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResourcesAllocationRequest.Merge(m, src) +} +func (m *GetResourcesAllocationRequest) XXX_Size() int { + return m.Size() +} +func (m *GetResourcesAllocationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetResourcesAllocationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResourcesAllocationRequest proto.InternalMessageInfo + +type GetResourcesAllocationResponse struct { + PodResources map[string]*ContainerResources `protobuf:"bytes,1,rep,name=pod_resources,json=podResources,proto3" json:"pod_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResourcesAllocationResponse) Reset() { *m = GetResourcesAllocationResponse{} } +func (*GetResourcesAllocationResponse) ProtoMessage() {} +func (*GetResourcesAllocationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{11} +} +func (m *GetResourcesAllocationResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetResourcesAllocationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetResourcesAllocationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetResourcesAllocationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResourcesAllocationResponse.Merge(m, src) +} +func (m *GetResourcesAllocationResponse) XXX_Size() int { + return m.Size() +} +func (m *GetResourcesAllocationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResourcesAllocationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResourcesAllocationResponse proto.InternalMessageInfo + +func (m *GetResourcesAllocationResponse) GetPodResources() map[string]*ContainerResources { + if m != nil { + return m.PodResources + } + return nil +} + +type ContainerResources struct { + ContainerResources map[string]*ResourceAllocation `protobuf:"bytes,1,rep,name=container_resources,json=containerResources,proto3" json:"container_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerResources) Reset() { *m = ContainerResources{} } +func (*ContainerResources) ProtoMessage() {} +func (*ContainerResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{12} +} +func (m *ContainerResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResources.Merge(m, src) +} +func (m *ContainerResources) XXX_Size() int { + return m.Size() +} +func (m *ContainerResources) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResources.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResources proto.InternalMessageInfo + +func (m *ContainerResources) GetContainerResources() map[string]*ResourceAllocation { + if m != nil { + return m.ContainerResources + } + return nil +} + +type ResourceAllocation struct { + ResourceAllocation map[string]*ResourceAllocationInfo `protobuf:"bytes,1,rep,name=resource_allocation,json=resourceAllocation,proto3" json:"resource_allocation,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceAllocation) Reset() { *m = ResourceAllocation{} } +func (*ResourceAllocation) ProtoMessage() {} +func (*ResourceAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{13} +} +func (m *ResourceAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAllocation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAllocation.Merge(m, src) +} +func (m *ResourceAllocation) XXX_Size() int { + return m.Size() +} +func (m *ResourceAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAllocation.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAllocation proto.InternalMessageInfo + +func (m *ResourceAllocation) GetResourceAllocation() map[string]*ResourceAllocationInfo { + if m != nil { + return m.ResourceAllocation + } + return nil +} + +type ResourceAllocationInfo struct { + OciPropertyName string `protobuf:"bytes,1,opt,name=oci_property_name,json=ociPropertyName,proto3" json:"oci_property_name,omitempty"` + IsNodeResource bool `protobuf:"varint,2,opt,name=is_node_resource,json=isNodeResource,proto3" json:"is_node_resource,omitempty"` + IsScalarResource bool `protobuf:"varint,3,opt,name=is_scalar_resource,json=isScalarResource,proto3" json:"is_scalar_resource,omitempty"` + // only for exclusive resources + AllocatedQuantity float64 `protobuf:"fixed64,4,opt,name=allocated_quantity,json=allocatedQuantity,proto3" json:"allocated_quantity,omitempty"` + AllocationResult string `protobuf:"bytes,5,opt,name=allocation_result,json=allocationResult,proto3" json:"allocation_result,omitempty"` + Envs map[string]string `protobuf:"bytes,6,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,7,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // topology hints corresponds to allocation information. + // we need it when kubelet restarts and resurces had been allocated. + // - why don't we use GetTopologyAwareResources of qrm to generate hints? + // - for those resources with accompanying resources, + // we can't generate hints of its accompanying resource by its allocation_result. + ResourceHints *ListOfTopologyHints `protobuf:"bytes,8,opt,name=resource_hints,json=resourceHints,proto3" json:"resource_hints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceAllocationInfo) Reset() { *m = ResourceAllocationInfo{} } +func (*ResourceAllocationInfo) ProtoMessage() {} +func (*ResourceAllocationInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{14} +} +func (m *ResourceAllocationInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAllocationInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAllocationInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAllocationInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAllocationInfo.Merge(m, src) +} +func (m *ResourceAllocationInfo) XXX_Size() int { + return m.Size() +} +func (m *ResourceAllocationInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAllocationInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAllocationInfo proto.InternalMessageInfo + +func (m *ResourceAllocationInfo) GetOciPropertyName() string { + if m != nil { + return m.OciPropertyName + } + return "" +} + +func (m *ResourceAllocationInfo) GetIsNodeResource() bool { + if m != nil { + return m.IsNodeResource + } + return false +} + +func (m *ResourceAllocationInfo) GetIsScalarResource() bool { + if m != nil { + return m.IsScalarResource + } + return false +} + +func (m *ResourceAllocationInfo) GetAllocatedQuantity() float64 { + if m != nil { + return m.AllocatedQuantity + } + return 0 +} + +func (m *ResourceAllocationInfo) GetAllocationResult() string { + if m != nil { + return m.AllocationResult + } + return "" +} + +func (m *ResourceAllocationInfo) GetEnvs() map[string]string { + if m != nil { + return m.Envs + } + return nil +} + +func (m *ResourceAllocationInfo) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ResourceAllocationInfo) GetResourceHints() *ListOfTopologyHints { + if m != nil { + return m.ResourceHints + } + return nil +} + +type GetTopologyAwareResourcesRequest struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + ContainerName string `protobuf:"bytes,2,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTopologyAwareResourcesRequest) Reset() { *m = GetTopologyAwareResourcesRequest{} } +func (*GetTopologyAwareResourcesRequest) ProtoMessage() {} +func (*GetTopologyAwareResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{15} +} +func (m *GetTopologyAwareResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTopologyAwareResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTopologyAwareResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTopologyAwareResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTopologyAwareResourcesRequest.Merge(m, src) +} +func (m *GetTopologyAwareResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetTopologyAwareResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTopologyAwareResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTopologyAwareResourcesRequest proto.InternalMessageInfo + +func (m *GetTopologyAwareResourcesRequest) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +func (m *GetTopologyAwareResourcesRequest) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +type GetTopologyAwareResourcesResponse struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + PodName string `protobuf:"bytes,2,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + PodNamespace string `protobuf:"bytes,3,opt,name=pod_namespace,json=podNamespace,proto3" json:"pod_namespace,omitempty"` + ContainerTopologyAwareResources *ContainerTopologyAwareResources `protobuf:"bytes,4,opt,name=container_topology_aware_resources,json=containerTopologyAwareResources,proto3" json:"container_topology_aware_resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTopologyAwareResourcesResponse) Reset() { *m = GetTopologyAwareResourcesResponse{} } +func (*GetTopologyAwareResourcesResponse) ProtoMessage() {} +func (*GetTopologyAwareResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{16} +} +func (m *GetTopologyAwareResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTopologyAwareResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTopologyAwareResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTopologyAwareResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTopologyAwareResourcesResponse.Merge(m, src) +} +func (m *GetTopologyAwareResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetTopologyAwareResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTopologyAwareResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTopologyAwareResourcesResponse proto.InternalMessageInfo + +func (m *GetTopologyAwareResourcesResponse) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +func (m *GetTopologyAwareResourcesResponse) GetPodName() string { + if m != nil { + return m.PodName + } + return "" +} + +func (m *GetTopologyAwareResourcesResponse) GetPodNamespace() string { + if m != nil { + return m.PodNamespace + } + return "" +} + +func (m *GetTopologyAwareResourcesResponse) GetContainerTopologyAwareResources() *ContainerTopologyAwareResources { + if m != nil { + return m.ContainerTopologyAwareResources + } + return nil +} + +type ContainerTopologyAwareResources struct { + ContainerName string `protobuf:"bytes,1,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + AllocatedResources map[string]*TopologyAwareResource `protobuf:"bytes,2,rep,name=allocated_resources,json=allocatedResources,proto3" json:"allocated_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerTopologyAwareResources) Reset() { *m = ContainerTopologyAwareResources{} } +func (*ContainerTopologyAwareResources) ProtoMessage() {} +func (*ContainerTopologyAwareResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{17} +} +func (m *ContainerTopologyAwareResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerTopologyAwareResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerTopologyAwareResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerTopologyAwareResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerTopologyAwareResources.Merge(m, src) +} +func (m *ContainerTopologyAwareResources) XXX_Size() int { + return m.Size() +} +func (m *ContainerTopologyAwareResources) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerTopologyAwareResources.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerTopologyAwareResources proto.InternalMessageInfo + +func (m *ContainerTopologyAwareResources) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +func (m *ContainerTopologyAwareResources) GetAllocatedResources() map[string]*TopologyAwareResource { + if m != nil { + return m.AllocatedResources + } + return nil +} + +type TopologyAwareResource struct { + IsNodeResource bool `protobuf:"varint,1,opt,name=is_node_resource,json=isNodeResource,proto3" json:"is_node_resource,omitempty"` + IsScalarResource bool `protobuf:"varint,2,opt,name=is_scalar_resource,json=isScalarResource,proto3" json:"is_scalar_resource,omitempty"` + AggregatedQuantity float64 `protobuf:"fixed64,3,opt,name=aggregated_quantity,json=aggregatedQuantity,proto3" json:"aggregated_quantity,omitempty"` + OriginalAggregatedQuantity float64 `protobuf:"fixed64,4,opt,name=original_aggregated_quantity,json=originalAggregatedQuantity,proto3" json:"original_aggregated_quantity,omitempty"` + TopologyAwareQuantityList []*TopologyAwareQuantity `protobuf:"bytes,5,rep,name=topology_aware_quantity_list,json=topologyAwareQuantityList,proto3" json:"topology_aware_quantity_list,omitempty"` + OriginalTopologyAwareQuantityList []*TopologyAwareQuantity `protobuf:"bytes,6,rep,name=original_topology_aware_quantity_list,json=originalTopologyAwareQuantityList,proto3" json:"original_topology_aware_quantity_list,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyAwareResource) Reset() { *m = TopologyAwareResource{} } +func (*TopologyAwareResource) ProtoMessage() {} +func (*TopologyAwareResource) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{18} +} +func (m *TopologyAwareResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologyAwareResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopologyAwareResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopologyAwareResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyAwareResource.Merge(m, src) +} +func (m *TopologyAwareResource) XXX_Size() int { + return m.Size() +} +func (m *TopologyAwareResource) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyAwareResource.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyAwareResource proto.InternalMessageInfo + +func (m *TopologyAwareResource) GetIsNodeResource() bool { + if m != nil { + return m.IsNodeResource + } + return false +} + +func (m *TopologyAwareResource) GetIsScalarResource() bool { + if m != nil { + return m.IsScalarResource + } + return false +} + +func (m *TopologyAwareResource) GetAggregatedQuantity() float64 { + if m != nil { + return m.AggregatedQuantity + } + return 0 +} + +func (m *TopologyAwareResource) GetOriginalAggregatedQuantity() float64 { + if m != nil { + return m.OriginalAggregatedQuantity + } + return 0 +} + +func (m *TopologyAwareResource) GetTopologyAwareQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.TopologyAwareQuantityList + } + return nil +} + +func (m *TopologyAwareResource) GetOriginalTopologyAwareQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.OriginalTopologyAwareQuantityList + } + return nil +} + +type TopologyAwareQuantity struct { + ResourceValue float64 `protobuf:"fixed64,1,opt,name=resource_value,json=resourceValue,proto3" json:"resource_value,omitempty"` + Node uint64 `protobuf:"varint,2,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyAwareQuantity) Reset() { *m = TopologyAwareQuantity{} } +func (*TopologyAwareQuantity) ProtoMessage() {} +func (*TopologyAwareQuantity) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{19} +} +func (m *TopologyAwareQuantity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologyAwareQuantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopologyAwareQuantity.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopologyAwareQuantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyAwareQuantity.Merge(m, src) +} +func (m *TopologyAwareQuantity) XXX_Size() int { + return m.Size() +} +func (m *TopologyAwareQuantity) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyAwareQuantity.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyAwareQuantity proto.InternalMessageInfo + +func (m *TopologyAwareQuantity) GetResourceValue() float64 { + if m != nil { + return m.ResourceValue + } + return 0 +} + +func (m *TopologyAwareQuantity) GetNode() uint64 { + if m != nil { + return m.Node + } + return 0 +} + +type GetTopologyAwareAllocatableResourcesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTopologyAwareAllocatableResourcesRequest) Reset() { + *m = GetTopologyAwareAllocatableResourcesRequest{} +} +func (*GetTopologyAwareAllocatableResourcesRequest) ProtoMessage() {} +func (*GetTopologyAwareAllocatableResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{20} +} +func (m *GetTopologyAwareAllocatableResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTopologyAwareAllocatableResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTopologyAwareAllocatableResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTopologyAwareAllocatableResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTopologyAwareAllocatableResourcesRequest.Merge(m, src) +} +func (m *GetTopologyAwareAllocatableResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetTopologyAwareAllocatableResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTopologyAwareAllocatableResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTopologyAwareAllocatableResourcesRequest proto.InternalMessageInfo + +type GetTopologyAwareAllocatableResourcesResponse struct { + AllocatableResources map[string]*AllocatableTopologyAwareResource `protobuf:"bytes,1,rep,name=allocatable_resources,json=allocatableResources,proto3" json:"allocatable_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTopologyAwareAllocatableResourcesResponse) Reset() { + *m = GetTopologyAwareAllocatableResourcesResponse{} +} +func (*GetTopologyAwareAllocatableResourcesResponse) ProtoMessage() {} +func (*GetTopologyAwareAllocatableResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{21} +} +func (m *GetTopologyAwareAllocatableResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTopologyAwareAllocatableResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTopologyAwareAllocatableResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTopologyAwareAllocatableResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTopologyAwareAllocatableResourcesResponse.Merge(m, src) +} +func (m *GetTopologyAwareAllocatableResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetTopologyAwareAllocatableResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTopologyAwareAllocatableResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTopologyAwareAllocatableResourcesResponse proto.InternalMessageInfo + +func (m *GetTopologyAwareAllocatableResourcesResponse) GetAllocatableResources() map[string]*AllocatableTopologyAwareResource { + if m != nil { + return m.AllocatableResources + } + return nil +} + +type AllocatableTopologyAwareResource struct { + IsNodeResource bool `protobuf:"varint,1,opt,name=is_node_resource,json=isNodeResource,proto3" json:"is_node_resource,omitempty"` + IsScalarResource bool `protobuf:"varint,2,opt,name=is_scalar_resource,json=isScalarResource,proto3" json:"is_scalar_resource,omitempty"` + AggregatedAllocatableQuantity float64 `protobuf:"fixed64,3,opt,name=aggregated_allocatable_quantity,json=aggregatedAllocatableQuantity,proto3" json:"aggregated_allocatable_quantity,omitempty"` + TopologyAwareAllocatableQuantityList []*TopologyAwareQuantity `protobuf:"bytes,4,rep,name=topology_aware_allocatable_quantity_list,json=topologyAwareAllocatableQuantityList,proto3" json:"topology_aware_allocatable_quantity_list,omitempty"` + AggregatedCapacityQuantity float64 `protobuf:"fixed64,5,opt,name=aggregated_capacity_quantity,json=aggregatedCapacityQuantity,proto3" json:"aggregated_capacity_quantity,omitempty"` + TopologyAwareCapacityQuantityList []*TopologyAwareQuantity `protobuf:"bytes,6,rep,name=topology_aware_capacity_quantity_list,json=topologyAwareCapacityQuantityList,proto3" json:"topology_aware_capacity_quantity_list,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatableTopologyAwareResource) Reset() { *m = AllocatableTopologyAwareResource{} } +func (*AllocatableTopologyAwareResource) ProtoMessage() {} +func (*AllocatableTopologyAwareResource) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{22} +} +func (m *AllocatableTopologyAwareResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatableTopologyAwareResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AllocatableTopologyAwareResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AllocatableTopologyAwareResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatableTopologyAwareResource.Merge(m, src) +} +func (m *AllocatableTopologyAwareResource) XXX_Size() int { + return m.Size() +} +func (m *AllocatableTopologyAwareResource) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatableTopologyAwareResource.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatableTopologyAwareResource proto.InternalMessageInfo + +func (m *AllocatableTopologyAwareResource) GetIsNodeResource() bool { + if m != nil { + return m.IsNodeResource + } + return false +} + +func (m *AllocatableTopologyAwareResource) GetIsScalarResource() bool { + if m != nil { + return m.IsScalarResource + } + return false +} + +func (m *AllocatableTopologyAwareResource) GetAggregatedAllocatableQuantity() float64 { + if m != nil { + return m.AggregatedAllocatableQuantity + } + return 0 +} + +func (m *AllocatableTopologyAwareResource) GetTopologyAwareAllocatableQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.TopologyAwareAllocatableQuantityList + } + return nil +} + +func (m *AllocatableTopologyAwareResource) GetAggregatedCapacityQuantity() float64 { + if m != nil { + return m.AggregatedCapacityQuantity + } + return 0 +} + +func (m *AllocatableTopologyAwareResource) GetTopologyAwareCapacityQuantityList() []*TopologyAwareQuantity { + if m != nil { + return m.TopologyAwareCapacityQuantityList + } + return nil +} + +// - PreStartContainer is expected to be called before each container start if indicated by plugin during registration phase. +// - PreStartContainer allows kubelet to pass reinitialized resources to containers. +// - PreStartContainer allows Resource Plugin to run resource specific operations on +// the resources requested +type PreStartContainerRequest struct { + PodUid string `protobuf:"bytes,1,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + PodNamespace string `protobuf:"bytes,2,opt,name=pod_namespace,json=podNamespace,proto3" json:"pod_namespace,omitempty"` + PodName string `protobuf:"bytes,3,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + ContainerName string `protobuf:"bytes,4,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreStartContainerRequest) Reset() { *m = PreStartContainerRequest{} } +func (*PreStartContainerRequest) ProtoMessage() {} +func (*PreStartContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{23} +} +func (m *PreStartContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreStartContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PreStartContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PreStartContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreStartContainerRequest.Merge(m, src) +} +func (m *PreStartContainerRequest) XXX_Size() int { + return m.Size() +} +func (m *PreStartContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PreStartContainerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PreStartContainerRequest proto.InternalMessageInfo + +func (m *PreStartContainerRequest) GetPodUid() string { + if m != nil { + return m.PodUid + } + return "" +} + +func (m *PreStartContainerRequest) GetPodNamespace() string { + if m != nil { + return m.PodNamespace + } + return "" +} + +func (m *PreStartContainerRequest) GetPodName() string { + if m != nil { + return m.PodName + } + return "" +} + +func (m *PreStartContainerRequest) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +// PreStartContainerResponse will be send by plugin in response to PreStartContainerRequest +type PreStartContainerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreStartContainerResponse) Reset() { *m = PreStartContainerResponse{} } +func (*PreStartContainerResponse) ProtoMessage() {} +func (*PreStartContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{24} +} +func (m *PreStartContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreStartContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PreStartContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PreStartContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreStartContainerResponse.Merge(m, src) +} +func (m *PreStartContainerResponse) XXX_Size() int { + return m.Size() +} +func (m *PreStartContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PreStartContainerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PreStartContainerResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("resourceplugin.v1alpha1.ContainerType", ContainerType_name, ContainerType_value) + proto.RegisterType((*ResourcePluginOptions)(nil), "resourceplugin.v1alpha1.ResourcePluginOptions") + proto.RegisterType((*RegisterRequest)(nil), "resourceplugin.v1alpha1.RegisterRequest") + proto.RegisterType((*ResourceRequest)(nil), "resourceplugin.v1alpha1.ResourceRequest") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceRequest.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceRequest.LabelsEntry") + proto.RegisterMapType((map[string]float64)(nil), "resourceplugin.v1alpha1.ResourceRequest.ResourceRequestsEntry") + proto.RegisterType((*ResourceHintsResponse)(nil), "resourceplugin.v1alpha1.ResourceHintsResponse") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceHintsResponse.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceHintsResponse.LabelsEntry") + proto.RegisterMapType((map[string]*ListOfTopologyHints)(nil), "resourceplugin.v1alpha1.ResourceHintsResponse.ResourceHintsEntry") + proto.RegisterType((*ResourceAllocationResponse)(nil), "resourceplugin.v1alpha1.ResourceAllocationResponse") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceAllocationResponse.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceAllocationResponse.LabelsEntry") + proto.RegisterType((*ListOfTopologyHints)(nil), "resourceplugin.v1alpha1.ListOfTopologyHints") + proto.RegisterType((*TopologyHint)(nil), "resourceplugin.v1alpha1.TopologyHint") + proto.RegisterType((*Empty)(nil), "resourceplugin.v1alpha1.Empty") + proto.RegisterType((*RemovePodRequest)(nil), "resourceplugin.v1alpha1.RemovePodRequest") + proto.RegisterType((*RemovePodResponse)(nil), "resourceplugin.v1alpha1.RemovePodResponse") + proto.RegisterType((*GetResourcesAllocationRequest)(nil), "resourceplugin.v1alpha1.GetResourcesAllocationRequest") + proto.RegisterType((*GetResourcesAllocationResponse)(nil), "resourceplugin.v1alpha1.GetResourcesAllocationResponse") + proto.RegisterMapType((map[string]*ContainerResources)(nil), "resourceplugin.v1alpha1.GetResourcesAllocationResponse.PodResourcesEntry") + proto.RegisterType((*ContainerResources)(nil), "resourceplugin.v1alpha1.ContainerResources") + proto.RegisterMapType((map[string]*ResourceAllocation)(nil), "resourceplugin.v1alpha1.ContainerResources.ContainerResourcesEntry") + proto.RegisterType((*ResourceAllocation)(nil), "resourceplugin.v1alpha1.ResourceAllocation") + proto.RegisterMapType((map[string]*ResourceAllocationInfo)(nil), "resourceplugin.v1alpha1.ResourceAllocation.ResourceAllocationEntry") + proto.RegisterType((*ResourceAllocationInfo)(nil), "resourceplugin.v1alpha1.ResourceAllocationInfo") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceAllocationInfo.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "resourceplugin.v1alpha1.ResourceAllocationInfo.EnvsEntry") + proto.RegisterType((*GetTopologyAwareResourcesRequest)(nil), "resourceplugin.v1alpha1.GetTopologyAwareResourcesRequest") + proto.RegisterType((*GetTopologyAwareResourcesResponse)(nil), "resourceplugin.v1alpha1.GetTopologyAwareResourcesResponse") + proto.RegisterType((*ContainerTopologyAwareResources)(nil), "resourceplugin.v1alpha1.ContainerTopologyAwareResources") + proto.RegisterMapType((map[string]*TopologyAwareResource)(nil), "resourceplugin.v1alpha1.ContainerTopologyAwareResources.AllocatedResourcesEntry") + proto.RegisterType((*TopologyAwareResource)(nil), "resourceplugin.v1alpha1.TopologyAwareResource") + proto.RegisterType((*TopologyAwareQuantity)(nil), "resourceplugin.v1alpha1.TopologyAwareQuantity") + proto.RegisterType((*GetTopologyAwareAllocatableResourcesRequest)(nil), "resourceplugin.v1alpha1.GetTopologyAwareAllocatableResourcesRequest") + proto.RegisterType((*GetTopologyAwareAllocatableResourcesResponse)(nil), "resourceplugin.v1alpha1.GetTopologyAwareAllocatableResourcesResponse") + proto.RegisterMapType((map[string]*AllocatableTopologyAwareResource)(nil), "resourceplugin.v1alpha1.GetTopologyAwareAllocatableResourcesResponse.AllocatableResourcesEntry") + proto.RegisterType((*AllocatableTopologyAwareResource)(nil), "resourceplugin.v1alpha1.AllocatableTopologyAwareResource") + proto.RegisterType((*PreStartContainerRequest)(nil), "resourceplugin.v1alpha1.PreStartContainerRequest") + proto.RegisterType((*PreStartContainerResponse)(nil), "resourceplugin.v1alpha1.PreStartContainerResponse") +} + +func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } + +var fileDescriptor_00212fb1f9d3bf1c = []byte{ + // 1864 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xcd, 0x73, 0xd3, 0xda, + 0x15, 0xb7, 0x6c, 0xc7, 0x1f, 0x27, 0x1f, 0x38, 0x37, 0x84, 0x38, 0x6a, 0x70, 0x82, 0x4a, 0xa8, + 0x4b, 0xc0, 0x19, 0x42, 0x07, 0x08, 0x9d, 0x01, 0x4c, 0xe2, 0x42, 0x66, 0x92, 0x90, 0x2a, 0x29, + 0xed, 0xb4, 0x0b, 0x8f, 0x62, 0xdf, 0x38, 0x1a, 0x14, 0x5d, 0x21, 0xc9, 0xa1, 0x9e, 0xe9, 0x82, + 0x32, 0xa5, 0x9b, 0x6e, 0x58, 0x74, 0xd5, 0xe9, 0xb2, 0xff, 0x42, 0xfb, 0x0f, 0x74, 0xc3, 0x74, + 0xd5, 0x55, 0xa7, 0xcb, 0x92, 0xfe, 0x0f, 0xef, 0xcd, 0xdb, 0x30, 0x6f, 0x74, 0xaf, 0x3e, 0x2d, + 0xc9, 0x5f, 0xc0, 0x7b, 0x2c, 0xd8, 0x49, 0xe7, 0x9e, 0x7b, 0xbe, 0x7f, 0xe7, 0x1e, 0x5d, 0x41, + 0x5e, 0xd2, 0xe4, 0x8a, 0xa6, 0x13, 0x93, 0xa0, 0x39, 0x1d, 0x1b, 0xa4, 0xad, 0x37, 0xb0, 0xa6, + 0xb4, 0x5b, 0xb2, 0x5a, 0x39, 0xbd, 0x21, 0x29, 0xda, 0xb1, 0x74, 0x83, 0xbf, 0xde, 0x92, 0xcd, + 0xe3, 0xf6, 0x61, 0xa5, 0x41, 0x4e, 0x56, 0x5b, 0xa4, 0x45, 0x56, 0x29, 0xff, 0x61, 0xfb, 0x88, + 0xbe, 0xd1, 0x17, 0xfa, 0xc4, 0xe4, 0x08, 0x7f, 0xe3, 0x60, 0x56, 0xb4, 0x45, 0xed, 0x51, 0x51, + 0x4f, 0x34, 0x53, 0x26, 0xaa, 0x81, 0xae, 0x01, 0xd2, 0x74, 0x5c, 0x37, 0x4c, 0x49, 0x37, 0xeb, + 0x3a, 0x7e, 0xde, 0x96, 0x75, 0xdc, 0x2c, 0x72, 0x4b, 0x5c, 0x39, 0x27, 0x16, 0x34, 0x1d, 0xef, + 0x5b, 0x0b, 0xa2, 0x4d, 0x47, 0xb7, 0x60, 0xee, 0x85, 0x6c, 0x1e, 0xd7, 0x4d, 0xa2, 0x11, 0x85, + 0xb4, 0x3a, 0x75, 0x49, 0x91, 0x5b, 0xea, 0x09, 0x56, 0xcd, 0x62, 0x92, 0x6e, 0x99, 0xb5, 0x96, + 0x0f, 0xec, 0xd5, 0xaa, 0xb3, 0x88, 0x96, 0x61, 0x4a, 0xc5, 0xb8, 0x59, 0xd7, 0x71, 0x83, 0xa8, + 0x0d, 0x59, 0xc1, 0xc5, 0x14, 0x65, 0x9f, 0xb4, 0xa8, 0xa2, 0x43, 0x14, 0xfe, 0xc1, 0xc1, 0x39, + 0x11, 0xb7, 0x64, 0xc3, 0xc4, 0xba, 0xa5, 0x13, 0x1b, 0x26, 0x2a, 0x42, 0xf6, 0x14, 0xeb, 0x86, + 0x4c, 0x54, 0x6a, 0x55, 0x5e, 0x74, 0x5e, 0x11, 0x0f, 0x39, 0xac, 0x36, 0x35, 0x22, 0xdb, 0xda, + 0xf3, 0xa2, 0xfb, 0x8e, 0x7e, 0x08, 0x93, 0x4e, 0xe8, 0xea, 0xaa, 0x74, 0xc2, 0xf4, 0xe5, 0xc5, + 0x09, 0x87, 0xb8, 0x2b, 0x9d, 0x60, 0xf4, 0x18, 0xb2, 0x84, 0x85, 0xa1, 0x98, 0x5e, 0xe2, 0xca, + 0xe3, 0x6b, 0x95, 0x4a, 0x4c, 0xbc, 0x2b, 0x91, 0xc1, 0x13, 0x9d, 0xed, 0xc2, 0xd7, 0x19, 0xcb, + 0x70, 0xc6, 0xe2, 0x18, 0x3e, 0x07, 0x59, 0x8d, 0x34, 0xeb, 0x6d, 0xb9, 0x69, 0x1b, 0x9e, 0xd1, + 0x48, 0xf3, 0x17, 0x72, 0xd3, 0xb2, 0xcd, 0x5a, 0xb0, 0xcc, 0x32, 0x34, 0xa9, 0x81, 0x6d, 0xe3, + 0x27, 0x34, 0xd2, 0xdc, 0x75, 0x68, 0x68, 0x1e, 0x72, 0x0e, 0x93, 0x6d, 0x7b, 0xd6, 0x5e, 0xb7, + 0x82, 0xd9, 0x20, 0xaa, 0x29, 0xc9, 0x2a, 0xd6, 0x19, 0x43, 0x9a, 0x32, 0x4c, 0xba, 0x54, 0xca, + 0xb6, 0xe3, 0x67, 0x33, 0x3b, 0x1a, 0x2e, 0x8e, 0x2d, 0x71, 0xe5, 0xa9, 0xb5, 0x2b, 0xb1, 0x4e, + 0x6e, 0x38, 0xec, 0x07, 0x1d, 0x0d, 0xfb, 0xc4, 0x59, 0xaf, 0xe8, 0x47, 0x70, 0xce, 0x13, 0x27, + 0xab, 0x4d, 0xfc, 0xdb, 0x62, 0x66, 0x89, 0x2b, 0xa7, 0x45, 0x4f, 0xcb, 0x96, 0x45, 0x75, 0x2c, + 0xd7, 0x89, 0x82, 0x8b, 0x59, 0xd7, 0x72, 0x91, 0x28, 0xae, 0x53, 0xd4, 0x98, 0x9c, 0xbb, 0x44, + 0xc5, 0x87, 0x12, 0x96, 0x8f, 0x48, 0xd8, 0x3a, 0xa4, 0x8f, 0xad, 0x6c, 0x03, 0xcd, 0xd6, 0x72, + 0xac, 0x23, 0x4e, 0x01, 0x3e, 0x96, 0x55, 0x53, 0xa4, 0x5b, 0xd0, 0x33, 0x98, 0x76, 0xe5, 0xeb, + 0x2c, 0x43, 0x46, 0x71, 0x7c, 0x29, 0x55, 0x1e, 0x5f, 0xbb, 0xd7, 0x37, 0xeb, 0x76, 0x4a, 0xbb, + 0xdf, 0x8d, 0x9a, 0x6a, 0xea, 0x1d, 0xb1, 0xa0, 0x77, 0x91, 0xd1, 0x36, 0x64, 0x14, 0xe9, 0x10, + 0x2b, 0x46, 0x71, 0x82, 0x6a, 0xf8, 0xc9, 0xc0, 0x1a, 0xb6, 0xe9, 0x36, 0x26, 0xd7, 0x96, 0x81, + 0x7e, 0x03, 0xe3, 0x92, 0xaa, 0x12, 0x53, 0x62, 0xa5, 0x3a, 0x49, 0x45, 0xae, 0x0f, 0x2c, 0xb2, + 0xea, 0xed, 0x65, 0x72, 0xfd, 0xd2, 0xf8, 0x0d, 0xaf, 0x31, 0x04, 0xbc, 0x42, 0x05, 0x48, 0x3d, + 0xc3, 0x1d, 0xbb, 0x74, 0xad, 0x47, 0x74, 0x1e, 0xc6, 0x4e, 0x25, 0xa5, 0xcd, 0xea, 0x95, 0x13, + 0xd9, 0xcb, 0xdd, 0xe4, 0x1d, 0x8e, 0x5f, 0x87, 0x71, 0x9f, 0xe1, 0xfd, 0xb6, 0xe6, 0xfd, 0x5b, + 0xef, 0x41, 0xa1, 0xdb, 0xc0, 0x61, 0xf6, 0x0b, 0x5f, 0x65, 0x3c, 0x07, 0xac, 0x74, 0x1b, 0x22, + 0x36, 0x34, 0xa2, 0x1a, 0xf8, 0x0b, 0xfe, 0x3e, 0x3e, 0xfe, 0x8e, 0x61, 0xca, 0x65, 0xb2, 0x50, + 0x65, 0x14, 0x81, 0x16, 0x63, 0xb5, 0x6f, 0x31, 0x06, 0x52, 0x13, 0xa4, 0xb2, 0xa2, 0x74, 0xb5, + 0x53, 0x1a, 0x12, 0x5d, 0x04, 0x31, 0x8c, 0xde, 0x1d, 0x52, 0x43, 0x14, 0x8e, 0xa4, 0x20, 0x8e, + 0x18, 0x34, 0xef, 0x0f, 0x29, 0xb8, 0x37, 0x9a, 0x54, 0x40, 0x61, 0xdf, 0x22, 0xea, 0xf9, 0xa1, + 0xbf, 0x9e, 0xc7, 0xd7, 0xae, 0xc5, 0x1a, 0xb1, 0x2d, 0x1b, 0xe6, 0x93, 0x23, 0x7f, 0x3f, 0x33, + 0x3e, 0x13, 0xe0, 0xbd, 0xce, 0x00, 0xef, 0xf8, 0x5a, 0x55, 0x14, 0xd2, 0xa0, 0x82, 0xbe, 0xa0, + 0xef, 0xd3, 0xa1, 0xef, 0x57, 0x30, 0x2d, 0xb9, 0x81, 0xae, 0xeb, 0xd8, 0x68, 0x2b, 0xce, 0x51, + 0xb8, 0xd2, 0xb7, 0x8a, 0x7d, 0x29, 0x2a, 0x48, 0xfe, 0x74, 0xb5, 0x15, 0x13, 0xfd, 0xb2, 0x0b, + 0x6d, 0xf7, 0x87, 0x11, 0xd7, 0x0b, 0x72, 0x47, 0x51, 0x90, 0xdb, 0x1c, 0x45, 0x7a, 0x6f, 0xdc, + 0x7d, 0x8f, 0x38, 0x10, 0x61, 0x26, 0x02, 0xa4, 0xe8, 0xa7, 0x30, 0xc6, 0x3a, 0x24, 0x47, 0x7d, + 0x1e, 0x70, 0x56, 0x61, 0x7b, 0x84, 0x87, 0x30, 0xe1, 0x27, 0x5b, 0xda, 0x55, 0xd2, 0xc4, 0x4c, + 0x58, 0x5a, 0x64, 0x2f, 0x68, 0x01, 0xf2, 0x9a, 0x8e, 0x8f, 0xb0, 0x6e, 0x4d, 0xec, 0x6c, 0xfc, + 0xf6, 0x08, 0x42, 0x16, 0xc6, 0x6a, 0x27, 0x9a, 0xd9, 0x11, 0x56, 0xa0, 0x20, 0xe2, 0x13, 0x72, + 0x8a, 0xf7, 0x48, 0xb3, 0xdf, 0x6c, 0x2a, 0xcc, 0xc0, 0xb4, 0x8f, 0x99, 0xc5, 0x5e, 0x58, 0x84, + 0x8b, 0x8f, 0xb0, 0xe9, 0x24, 0xc7, 0xf0, 0x67, 0x87, 0x8a, 0x13, 0xde, 0x73, 0x50, 0x8a, 0xe3, + 0xb0, 0xfb, 0x81, 0xca, 0x60, 0xef, 0x44, 0xc1, 0x89, 0xcb, 0x56, 0x6c, 0x5c, 0x7a, 0xcb, 0xab, + 0x30, 0xfb, 0xd8, 0x32, 0x2b, 0x08, 0xab, 0x83, 0xb8, 0x24, 0x5e, 0x81, 0xe9, 0x10, 0x4b, 0x44, + 0x5e, 0xab, 0xc1, 0x46, 0xbc, 0xd2, 0xbf, 0x3b, 0xb8, 0x22, 0xfd, 0x45, 0xf0, 0x9e, 0x03, 0x14, + 0xe6, 0x40, 0x26, 0xcc, 0x78, 0x5d, 0xa3, 0xdb, 0xf5, 0x8d, 0x21, 0x74, 0x45, 0x90, 0x98, 0xd3, + 0xa8, 0x11, 0x5a, 0xe0, 0x75, 0x98, 0x8b, 0x61, 0xff, 0x90, 0x00, 0x44, 0x60, 0xd3, 0x17, 0x80, + 0x57, 0x49, 0xef, 0xe4, 0xf3, 0x38, 0xac, 0x00, 0xb8, 0x7d, 0xcd, 0xeb, 0x3a, 0x7d, 0x03, 0x10, + 0x96, 0x14, 0x41, 0xb2, 0x03, 0xa0, 0x87, 0x16, 0xf8, 0x53, 0x98, 0x8b, 0x61, 0x8f, 0x08, 0x40, + 0x2d, 0x18, 0x80, 0xd5, 0x21, 0x8c, 0xda, 0x52, 0x8f, 0x88, 0x3f, 0x08, 0xdf, 0xa4, 0xe1, 0x42, + 0x34, 0x17, 0xba, 0x0a, 0xd3, 0xa4, 0x21, 0xd7, 0x35, 0x9d, 0x68, 0x58, 0x37, 0x3b, 0xac, 0xc9, + 0x33, 0x2b, 0xce, 0x91, 0x86, 0xbc, 0x67, 0xd3, 0x69, 0x9f, 0x2f, 0x43, 0x41, 0x36, 0xea, 0x16, + 0xc6, 0xdd, 0x9a, 0xb1, 0xe1, 0x3d, 0x25, 0x1b, 0xbb, 0xa4, 0x89, 0x1d, 0x1d, 0xd6, 0xc7, 0xbb, + 0x6c, 0xd4, 0x8d, 0x86, 0xa4, 0x48, 0x5e, 0x7d, 0xd9, 0x9f, 0xd6, 0x05, 0xd9, 0xd8, 0xa7, 0x0b, + 0x2e, 0xf7, 0x75, 0x40, 0x76, 0x0e, 0x70, 0xb3, 0xfe, 0xbc, 0x2d, 0xa9, 0xa6, 0x6c, 0x76, 0xe8, + 0xe9, 0xc9, 0x89, 0xd3, 0xee, 0xca, 0xcf, 0xed, 0x05, 0xb4, 0x12, 0x75, 0xdc, 0x8c, 0x51, 0x93, + 0xc3, 0x27, 0xc8, 0x0e, 0xa4, 0xb1, 0x7a, 0x6a, 0x14, 0x33, 0x03, 0x7e, 0x9c, 0x04, 0xc3, 0x53, + 0xa9, 0xa9, 0xa7, 0x76, 0x41, 0x53, 0x31, 0xe8, 0x30, 0x78, 0x6e, 0x64, 0xa9, 0xd4, 0x07, 0xc3, + 0x4a, 0xed, 0x79, 0x66, 0xa0, 0xfd, 0xd0, 0x30, 0x9b, 0x1b, 0x61, 0x18, 0x0b, 0xce, 0xad, 0xfc, + 0x6d, 0xc8, 0xbb, 0xbe, 0x7c, 0xa7, 0xc7, 0xd0, 0x21, 0x2c, 0x3d, 0xc2, 0xa6, 0x7b, 0xf3, 0xf2, + 0x42, 0xd2, 0xb1, 0xd7, 0xa9, 0xfa, 0xdd, 0x48, 0x84, 0x67, 0xaa, 0x64, 0xc4, 0x4c, 0x25, 0xfc, + 0x21, 0x09, 0x97, 0x7a, 0x28, 0xe9, 0x37, 0xf9, 0xf9, 0x87, 0xba, 0x64, 0x70, 0xa8, 0x0b, 0x0d, + 0x85, 0xa9, 0x88, 0xa1, 0xf0, 0x35, 0x07, 0x82, 0x6f, 0xa6, 0x73, 0xaf, 0xa0, 0x2c, 0x33, 0x7c, + 0xdd, 0x95, 0x5d, 0xe5, 0xdc, 0x19, 0x60, 0xce, 0x8b, 0xf6, 0x63, 0xb1, 0xd1, 0x9b, 0x41, 0xf8, + 0x67, 0x12, 0x16, 0xfb, 0x08, 0x89, 0x88, 0x28, 0x17, 0x35, 0xa5, 0xfe, 0x9e, 0x83, 0x19, 0x0f, + 0x93, 0x9e, 0x0f, 0x49, 0x5a, 0xf0, 0x7b, 0xa3, 0xfa, 0x50, 0xa9, 0x3a, 0x32, 0xbb, 0x8f, 0x0b, + 0x29, 0xb4, 0xc0, 0xb7, 0x61, 0x2e, 0x86, 0x3d, 0xa2, 0x00, 0x37, 0x83, 0xdd, 0xb2, 0xd2, 0x77, + 0xac, 0x09, 0x18, 0xe6, 0x2f, 0xd8, 0xff, 0xa4, 0x60, 0x36, 0x92, 0x29, 0xb2, 0xff, 0x71, 0x43, + 0xf4, 0xbf, 0x64, 0x4c, 0xff, 0x5b, 0x85, 0x19, 0xa9, 0xd5, 0xd2, 0x71, 0x2b, 0xd8, 0x00, 0x53, + 0xb4, 0x01, 0x22, 0x6f, 0xc9, 0xed, 0x80, 0x0f, 0x60, 0x81, 0xe8, 0x72, 0x4b, 0x56, 0x25, 0xa5, + 0x1e, 0xb5, 0x93, 0xb5, 0x4e, 0xde, 0xe1, 0xa9, 0x86, 0x25, 0x10, 0x58, 0xe8, 0xaa, 0x53, 0x67, + 0x73, 0x5d, 0x91, 0x0d, 0xab, 0x9d, 0xa6, 0x06, 0x8f, 0xa2, 0x23, 0x55, 0x9c, 0x37, 0xa3, 0xc8, + 0x56, 0x77, 0x42, 0x2f, 0x39, 0x58, 0x76, 0x6d, 0xee, 0xa9, 0x3a, 0x33, 0x92, 0xea, 0x4b, 0x8e, + 0xf0, 0x83, 0x38, 0x13, 0x04, 0xb1, 0x2b, 0xaf, 0x6e, 0x30, 0x96, 0x7d, 0x0d, 0x97, 0x15, 0x11, + 0x47, 0x03, 0xe8, 0xb6, 0xd0, 0xa7, 0x16, 0x11, 0x21, 0x48, 0x5b, 0xb9, 0xa7, 0x69, 0x4c, 0x8b, + 0xf4, 0x59, 0xb8, 0x0e, 0x2b, 0xdd, 0x8d, 0xc7, 0xae, 0x59, 0xe9, 0x50, 0x09, 0x35, 0x3a, 0xe1, + 0x5f, 0x49, 0xb8, 0x36, 0x18, 0xbf, 0xdd, 0xb3, 0xfe, 0xcc, 0xc1, 0xac, 0xe4, 0x31, 0x84, 0x66, + 0xb5, 0x7a, 0xaf, 0x31, 0x75, 0x60, 0x35, 0x95, 0xa8, 0x45, 0x06, 0xcc, 0xf3, 0x52, 0xc4, 0x12, + 0xff, 0x8a, 0x83, 0xf9, 0xd8, 0x3d, 0x11, 0xe8, 0x7c, 0x12, 0x44, 0x67, 0xfc, 0x31, 0xec, 0x13, + 0xda, 0x17, 0xa8, 0x6f, 0xd2, 0xb0, 0xd4, 0x8f, 0xff, 0x93, 0x61, 0xf6, 0x67, 0xb0, 0xe8, 0x43, + 0x9e, 0x3f, 0x45, 0x5d, 0xf8, 0xbd, 0xe8, 0xb1, 0xf9, 0x8c, 0x75, 0x6b, 0xef, 0x8f, 0x1c, 0x94, + 0xbb, 0xe0, 0x10, 0x25, 0x8c, 0x41, 0x23, 0x3d, 0x12, 0x34, 0x2e, 0x9b, 0x31, 0x75, 0x10, 0x00, + 0xe8, 0x03, 0x58, 0xf0, 0x39, 0xd4, 0x90, 0x34, 0xa9, 0x61, 0xe9, 0x75, 0xbd, 0x19, 0x63, 0x3d, + 0xc5, 0xe3, 0xd9, 0xb0, 0x59, 0x5c, 0x57, 0x2c, 0x88, 0x77, 0xb9, 0x12, 0x12, 0xf3, 0x41, 0x10, + 0x0f, 0xf8, 0xd1, 0xad, 0x9e, 0x42, 0xfc, 0x2f, 0x1c, 0x14, 0xf7, 0xec, 0x7f, 0x43, 0xbe, 0x4f, + 0x8d, 0xcf, 0xe2, 0xbf, 0x87, 0xf0, 0x03, 0x98, 0x8f, 0xb0, 0x8d, 0x21, 0xf0, 0xea, 0x7d, 0x98, + 0x0c, 0xdc, 0xf3, 0xa0, 0x1c, 0xa4, 0xb7, 0x76, 0xb7, 0x0e, 0x0a, 0x09, 0xeb, 0x69, 0xa7, 0xba, + 0xb5, 0x5b, 0xe0, 0xd0, 0x38, 0x64, 0xf7, 0xb7, 0x36, 0x6b, 0x1b, 0x55, 0xb1, 0x90, 0x44, 0x93, + 0x90, 0xaf, 0xed, 0x3d, 0xae, 0xed, 0xd4, 0xc4, 0xea, 0x76, 0x21, 0xb5, 0x76, 0x04, 0x13, 0xec, + 0x0f, 0x95, 0xce, 0xbe, 0x70, 0x9e, 0x42, 0xce, 0xf9, 0x63, 0x85, 0xca, 0x3d, 0x06, 0xd4, 0xc0, + 0x4f, 0x2d, 0xbe, 0x14, 0xcb, 0xc9, 0xbe, 0xd9, 0x13, 0x6b, 0x7f, 0xcd, 0xc1, 0x54, 0xf0, 0xa7, + 0x13, 0x52, 0xa1, 0xe0, 0xeb, 0x36, 0xec, 0x9a, 0xa1, 0x3c, 0xe8, 0x6f, 0x00, 0xbe, 0x32, 0xdc, + 0x45, 0xa7, 0x90, 0x40, 0x4d, 0xc8, 0xbb, 0x77, 0x01, 0xe8, 0xc7, 0x3d, 0xb6, 0x07, 0x2f, 0x17, + 0xf8, 0xab, 0x83, 0xb0, 0xba, 0x5a, 0xfe, 0xc4, 0xc1, 0x85, 0xe8, 0x6f, 0x7d, 0x74, 0x6b, 0xe8, + 0xcb, 0x01, 0x66, 0xc0, 0xed, 0x11, 0x2f, 0x15, 0x84, 0x84, 0x75, 0x10, 0xcc, 0xc7, 0x8e, 0xb8, + 0x68, 0x7d, 0xe0, 0x63, 0xa0, 0xfb, 0x48, 0xe2, 0xef, 0x8e, 0xb2, 0xd5, 0x35, 0xeb, 0xef, 0x1c, + 0x5c, 0x1e, 0xe4, 0xa4, 0x41, 0x9b, 0x1f, 0x78, 0x50, 0x31, 0x63, 0x6b, 0x1f, 0xe5, 0xb8, 0x13, + 0x12, 0x48, 0x81, 0xa2, 0x2f, 0xe4, 0xc1, 0x3f, 0xcf, 0x7d, 0x30, 0xc0, 0x0f, 0xf9, 0x33, 0x56, + 0x48, 0xa0, 0x67, 0x90, 0x73, 0x26, 0xd9, 0x21, 0x80, 0x71, 0x73, 0x84, 0xeb, 0x48, 0x21, 0x81, + 0x7e, 0x07, 0xd3, 0xa1, 0x36, 0x83, 0x6e, 0xc4, 0xca, 0x8a, 0x6b, 0x97, 0xfc, 0xda, 0x30, 0x5b, + 0x1c, 0xed, 0x0f, 0xaf, 0xbc, 0x7d, 0x57, 0xe2, 0xfe, 0xfb, 0xae, 0x94, 0x78, 0x79, 0x56, 0xe2, + 0xde, 0x9e, 0x95, 0xb8, 0x7f, 0x9f, 0x95, 0xb8, 0xff, 0x9d, 0x95, 0xb8, 0x37, 0xff, 0x2f, 0x25, + 0x7e, 0x9d, 0x73, 0x64, 0x1c, 0x66, 0xe8, 0xff, 0xff, 0x9b, 0xdf, 0x06, 0x00, 0x00, 0xff, 0xff, + 0xd1, 0x90, 0x91, 0x63, 0x54, 0x20, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RegistrationClient is the client API for Registration service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RegistrationClient interface { + Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*Empty, error) +} + +type registrationClient struct { + cc *grpc.ClientConn +} + +func NewRegistrationClient(cc *grpc.ClientConn) RegistrationClient { + return ®istrationClient{cc} +} + +func (c *registrationClient) Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.Registration/Register", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RegistrationServer is the server API for Registration service. +type RegistrationServer interface { + Register(context.Context, *RegisterRequest) (*Empty, error) +} + +// UnimplementedRegistrationServer can be embedded to have forward compatible implementations. +type UnimplementedRegistrationServer struct { +} + +func (*UnimplementedRegistrationServer) Register(ctx context.Context, req *RegisterRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Register not implemented") +} + +func RegisterRegistrationServer(s *grpc.Server, srv RegistrationServer) { + s.RegisterService(&_Registration_serviceDesc, srv) +} + +func _Registration_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegisterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationServer).Register(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.Registration/Register", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationServer).Register(ctx, req.(*RegisterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Registration_serviceDesc = grpc.ServiceDesc{ + ServiceName: "resourceplugin.v1alpha1.Registration", + HandlerType: (*RegistrationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Register", + Handler: _Registration_Register_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +// ResourcePluginClient is the client API for ResourcePlugin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ResourcePluginClient interface { + // GetTopologyHints returns hints of corresponding resources + GetTopologyHints(ctx context.Context, in *ResourceRequest, opts ...grpc.CallOption) (*ResourceHintsResponse, error) + // Notify the resource plugin that the pod has beed deleted, + // and the plugin should do some clear-up work. + RemovePod(ctx context.Context, in *RemovePodRequest, opts ...grpc.CallOption) (*RemovePodResponse, error) + // GetResourcesAllocation returns allocation results of corresponding resources + GetResourcesAllocation(ctx context.Context, in *GetResourcesAllocationRequest, opts ...grpc.CallOption) (*GetResourcesAllocationResponse, error) + // GetTopologyAwareResources returns allocation results of corresponding resources as topology aware format + GetTopologyAwareResources(ctx context.Context, in *GetTopologyAwareResourcesRequest, opts ...grpc.CallOption) (*GetTopologyAwareResourcesResponse, error) + // GetTopologyAwareResources returns corresponding allocatable resources as topology aware format + GetTopologyAwareAllocatableResources(ctx context.Context, in *GetTopologyAwareAllocatableResourcesRequest, opts ...grpc.CallOption) (*GetTopologyAwareAllocatableResourcesResponse, error) + // GetResourcePluginOptions returns options to be communicated with Resource + // Manager + GetResourcePluginOptions(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ResourcePluginOptions, error) + // Allocate is called during pod admit so that the resource + // plugin can allocate corresponding resource for the container + // according to resource request + Allocate(ctx context.Context, in *ResourceRequest, opts ...grpc.CallOption) (*ResourceAllocationResponse, error) + // PreStartContainer is called, if indicated by resource plugin during registeration phase, + // before each container start. Resource plugin can run resource specific operations + // such as resetting the resource before making resources available to the container + PreStartContainer(ctx context.Context, in *PreStartContainerRequest, opts ...grpc.CallOption) (*PreStartContainerResponse, error) +} + +type resourcePluginClient struct { + cc *grpc.ClientConn +} + +func NewResourcePluginClient(cc *grpc.ClientConn) ResourcePluginClient { + return &resourcePluginClient{cc} +} + +func (c *resourcePluginClient) GetTopologyHints(ctx context.Context, in *ResourceRequest, opts ...grpc.CallOption) (*ResourceHintsResponse, error) { + out := new(ResourceHintsResponse) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/GetTopologyHints", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePluginClient) RemovePod(ctx context.Context, in *RemovePodRequest, opts ...grpc.CallOption) (*RemovePodResponse, error) { + out := new(RemovePodResponse) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/RemovePod", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePluginClient) GetResourcesAllocation(ctx context.Context, in *GetResourcesAllocationRequest, opts ...grpc.CallOption) (*GetResourcesAllocationResponse, error) { + out := new(GetResourcesAllocationResponse) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/GetResourcesAllocation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePluginClient) GetTopologyAwareResources(ctx context.Context, in *GetTopologyAwareResourcesRequest, opts ...grpc.CallOption) (*GetTopologyAwareResourcesResponse, error) { + out := new(GetTopologyAwareResourcesResponse) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/GetTopologyAwareResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePluginClient) GetTopologyAwareAllocatableResources(ctx context.Context, in *GetTopologyAwareAllocatableResourcesRequest, opts ...grpc.CallOption) (*GetTopologyAwareAllocatableResourcesResponse, error) { + out := new(GetTopologyAwareAllocatableResourcesResponse) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/GetTopologyAwareAllocatableResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePluginClient) GetResourcePluginOptions(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ResourcePluginOptions, error) { + out := new(ResourcePluginOptions) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/GetResourcePluginOptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePluginClient) Allocate(ctx context.Context, in *ResourceRequest, opts ...grpc.CallOption) (*ResourceAllocationResponse, error) { + out := new(ResourceAllocationResponse) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/Allocate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePluginClient) PreStartContainer(ctx context.Context, in *PreStartContainerRequest, opts ...grpc.CallOption) (*PreStartContainerResponse, error) { + out := new(PreStartContainerResponse) + err := c.cc.Invoke(ctx, "/resourceplugin.v1alpha1.ResourcePlugin/PreStartContainer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ResourcePluginServer is the server API for ResourcePlugin service. +type ResourcePluginServer interface { + // GetTopologyHints returns hints of corresponding resources + GetTopologyHints(context.Context, *ResourceRequest) (*ResourceHintsResponse, error) + // Notify the resource plugin that the pod has beed deleted, + // and the plugin should do some clear-up work. + RemovePod(context.Context, *RemovePodRequest) (*RemovePodResponse, error) + // GetResourcesAllocation returns allocation results of corresponding resources + GetResourcesAllocation(context.Context, *GetResourcesAllocationRequest) (*GetResourcesAllocationResponse, error) + // GetTopologyAwareResources returns allocation results of corresponding resources as topology aware format + GetTopologyAwareResources(context.Context, *GetTopologyAwareResourcesRequest) (*GetTopologyAwareResourcesResponse, error) + // GetTopologyAwareResources returns corresponding allocatable resources as topology aware format + GetTopologyAwareAllocatableResources(context.Context, *GetTopologyAwareAllocatableResourcesRequest) (*GetTopologyAwareAllocatableResourcesResponse, error) + // GetResourcePluginOptions returns options to be communicated with Resource + // Manager + GetResourcePluginOptions(context.Context, *Empty) (*ResourcePluginOptions, error) + // Allocate is called during pod admit so that the resource + // plugin can allocate corresponding resource for the container + // according to resource request + Allocate(context.Context, *ResourceRequest) (*ResourceAllocationResponse, error) + // PreStartContainer is called, if indicated by resource plugin during registeration phase, + // before each container start. Resource plugin can run resource specific operations + // such as resetting the resource before making resources available to the container + PreStartContainer(context.Context, *PreStartContainerRequest) (*PreStartContainerResponse, error) +} + +// UnimplementedResourcePluginServer can be embedded to have forward compatible implementations. +type UnimplementedResourcePluginServer struct { +} + +func (*UnimplementedResourcePluginServer) GetTopologyHints(ctx context.Context, req *ResourceRequest) (*ResourceHintsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopologyHints not implemented") +} +func (*UnimplementedResourcePluginServer) RemovePod(ctx context.Context, req *RemovePodRequest) (*RemovePodResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemovePod not implemented") +} +func (*UnimplementedResourcePluginServer) GetResourcesAllocation(ctx context.Context, req *GetResourcesAllocationRequest) (*GetResourcesAllocationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetResourcesAllocation not implemented") +} +func (*UnimplementedResourcePluginServer) GetTopologyAwareResources(ctx context.Context, req *GetTopologyAwareResourcesRequest) (*GetTopologyAwareResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopologyAwareResources not implemented") +} +func (*UnimplementedResourcePluginServer) GetTopologyAwareAllocatableResources(ctx context.Context, req *GetTopologyAwareAllocatableResourcesRequest) (*GetTopologyAwareAllocatableResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopologyAwareAllocatableResources not implemented") +} +func (*UnimplementedResourcePluginServer) GetResourcePluginOptions(ctx context.Context, req *Empty) (*ResourcePluginOptions, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetResourcePluginOptions not implemented") +} +func (*UnimplementedResourcePluginServer) Allocate(ctx context.Context, req *ResourceRequest) (*ResourceAllocationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Allocate not implemented") +} +func (*UnimplementedResourcePluginServer) PreStartContainer(ctx context.Context, req *PreStartContainerRequest) (*PreStartContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PreStartContainer not implemented") +} + +func RegisterResourcePluginServer(s *grpc.Server, srv ResourcePluginServer) { + s.RegisterService(&_ResourcePlugin_serviceDesc, srv) +} + +func _ResourcePlugin_GetTopologyHints_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).GetTopologyHints(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/GetTopologyHints", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).GetTopologyHints(ctx, req.(*ResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePlugin_RemovePod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemovePodRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).RemovePod(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/RemovePod", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).RemovePod(ctx, req.(*RemovePodRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePlugin_GetResourcesAllocation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourcesAllocationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).GetResourcesAllocation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/GetResourcesAllocation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).GetResourcesAllocation(ctx, req.(*GetResourcesAllocationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePlugin_GetTopologyAwareResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopologyAwareResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).GetTopologyAwareResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/GetTopologyAwareResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).GetTopologyAwareResources(ctx, req.(*GetTopologyAwareResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePlugin_GetTopologyAwareAllocatableResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopologyAwareAllocatableResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).GetTopologyAwareAllocatableResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/GetTopologyAwareAllocatableResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).GetTopologyAwareAllocatableResources(ctx, req.(*GetTopologyAwareAllocatableResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePlugin_GetResourcePluginOptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).GetResourcePluginOptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/GetResourcePluginOptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).GetResourcePluginOptions(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePlugin_Allocate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).Allocate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/Allocate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).Allocate(ctx, req.(*ResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePlugin_PreStartContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PreStartContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePluginServer).PreStartContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/resourceplugin.v1alpha1.ResourcePlugin/PreStartContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePluginServer).PreStartContainer(ctx, req.(*PreStartContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourcePlugin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "resourceplugin.v1alpha1.ResourcePlugin", + HandlerType: (*ResourcePluginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetTopologyHints", + Handler: _ResourcePlugin_GetTopologyHints_Handler, + }, + { + MethodName: "RemovePod", + Handler: _ResourcePlugin_RemovePod_Handler, + }, + { + MethodName: "GetResourcesAllocation", + Handler: _ResourcePlugin_GetResourcesAllocation_Handler, + }, + { + MethodName: "GetTopologyAwareResources", + Handler: _ResourcePlugin_GetTopologyAwareResources_Handler, + }, + { + MethodName: "GetTopologyAwareAllocatableResources", + Handler: _ResourcePlugin_GetTopologyAwareAllocatableResources_Handler, + }, + { + MethodName: "GetResourcePluginOptions", + Handler: _ResourcePlugin_GetResourcePluginOptions_Handler, + }, + { + MethodName: "Allocate", + Handler: _ResourcePlugin_Allocate_Handler, + }, + { + MethodName: "PreStartContainer", + Handler: _ResourcePlugin_PreStartContainer_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *ResourcePluginOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePluginOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePluginOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NeedReconcile { + i-- + if m.NeedReconcile { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.WithTopologyAlignment { + i-- + if m.WithTopologyAlignment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.PreStartRequired { + i-- + if m.PreStartRequired { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RegisterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegisterRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RegisterRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ResourceName) > 0 { + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x1a + } + if len(m.Endpoint) > 0 { + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) + i = encodeVarintApi(dAtA, i, uint64(len(m.Endpoint))) + i-- + dAtA[i] = 0x12 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ResourceRequests) > 0 { + for k := range m.ResourceRequests { + v := m.ResourceRequests[k] + baseI := i + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) + i-- + dAtA[i] = 0x11 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.Hint != nil { + { + size, err := m.Hint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.ResourceName) > 0 { + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x4a + } + if len(m.PodType) > 0 { + i -= len(m.PodType) + copy(dAtA[i:], m.PodType) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodType))) + i-- + dAtA[i] = 0x42 + } + if len(m.PodRole) > 0 { + i -= len(m.PodRole) + copy(dAtA[i:], m.PodRole) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodRole))) + i-- + dAtA[i] = 0x3a + } + if m.ContainerIndex != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerIndex)) + i-- + dAtA[i] = 0x30 + } + if m.ContainerType != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerType)) + i-- + dAtA[i] = 0x28 + } + if len(m.ContainerName) > 0 { + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x22 + } + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x1a + } + if len(m.PodNamespace) > 0 { + i -= len(m.PodNamespace) + copy(dAtA[i:], m.PodNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodNamespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceHintsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHintsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHintsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ResourceHints) > 0 { + for k := range m.ResourceHints { + v := m.ResourceHints[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.ResourceName) > 0 { + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x4a + } + if len(m.PodType) > 0 { + i -= len(m.PodType) + copy(dAtA[i:], m.PodType) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodType))) + i-- + dAtA[i] = 0x42 + } + if len(m.PodRole) > 0 { + i -= len(m.PodRole) + copy(dAtA[i:], m.PodRole) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodRole))) + i-- + dAtA[i] = 0x3a + } + if m.ContainerIndex != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerIndex)) + i-- + dAtA[i] = 0x30 + } + if m.ContainerType != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerType)) + i-- + dAtA[i] = 0x28 + } + if len(m.ContainerName) > 0 { + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x22 + } + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x1a + } + if len(m.PodNamespace) > 0 { + i -= len(m.PodNamespace) + copy(dAtA[i:], m.PodNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodNamespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceAllocationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAllocationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAllocationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.AllocationResult != nil { + { + size, err := m.AllocationResult.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.ResourceName) > 0 { + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x4a + } + if len(m.PodType) > 0 { + i -= len(m.PodType) + copy(dAtA[i:], m.PodType) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodType))) + i-- + dAtA[i] = 0x42 + } + if len(m.PodRole) > 0 { + i -= len(m.PodRole) + copy(dAtA[i:], m.PodRole) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodRole))) + i-- + dAtA[i] = 0x3a + } + if m.ContainerIndex != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerIndex)) + i-- + dAtA[i] = 0x30 + } + if m.ContainerType != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerType)) + i-- + dAtA[i] = 0x28 + } + if len(m.ContainerName) > 0 { + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x22 + } + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x1a + } + if len(m.PodNamespace) > 0 { + i -= len(m.PodNamespace) + copy(dAtA[i:], m.PodNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodNamespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListOfTopologyHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListOfTopologyHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOfTopologyHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hints) > 0 { + for iNdEx := len(m.Hints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Hints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TopologyHint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologyHint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologyHint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Preferred { + i-- + if m.Preferred { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Nodes) > 0 { + dAtA6 := make([]byte, len(m.Nodes)*10) + var j5 int + for _, num := range m.Nodes { + for num >= 1<<7 { + dAtA6[j5] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j5++ + } + dAtA6[j5] = uint8(num) + j5++ + } + i -= j5 + copy(dAtA[i:], dAtA6[:j5]) + i = encodeVarintApi(dAtA, i, uint64(j5)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RemovePodRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemovePodRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemovePodRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemovePodResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemovePodResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemovePodResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *GetResourcesAllocationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetResourcesAllocationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetResourcesAllocationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *GetResourcesAllocationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetResourcesAllocationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetResourcesAllocationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodResources) > 0 { + for k := range m.PodResources { + v := m.PodResources[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ContainerResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerResources) > 0 { + for k := range m.ContainerResources { + v := m.ContainerResources[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceAllocation) > 0 { + for k := range m.ResourceAllocation { + v := m.ResourceAllocation[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAllocationInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAllocationInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAllocationInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResourceHints != nil { + { + size, err := m.ResourceHints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Envs) > 0 { + for k := range m.Envs { + v := m.Envs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.AllocationResult) > 0 { + i -= len(m.AllocationResult) + copy(dAtA[i:], m.AllocationResult) + i = encodeVarintApi(dAtA, i, uint64(len(m.AllocationResult))) + i-- + dAtA[i] = 0x2a + } + if m.AllocatedQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AllocatedQuantity)))) + i-- + dAtA[i] = 0x21 + } + if m.IsScalarResource { + i-- + if m.IsScalarResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IsNodeResource { + i-- + if m.IsNodeResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.OciPropertyName) > 0 { + i -= len(m.OciPropertyName) + copy(dAtA[i:], m.OciPropertyName) + i = encodeVarintApi(dAtA, i, uint64(len(m.OciPropertyName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTopologyAwareResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTopologyAwareResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTopologyAwareResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerName) > 0 { + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x12 + } + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTopologyAwareResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTopologyAwareResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTopologyAwareResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ContainerTopologyAwareResources != nil { + { + size, err := m.ContainerTopologyAwareResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.PodNamespace) > 0 { + i -= len(m.PodNamespace) + copy(dAtA[i:], m.PodNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodNamespace))) + i-- + dAtA[i] = 0x1a + } + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x12 + } + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerTopologyAwareResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerTopologyAwareResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerTopologyAwareResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllocatedResources) > 0 { + for k := range m.AllocatedResources { + v := m.AllocatedResources[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ContainerName) > 0 { + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TopologyAwareResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologyAwareResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologyAwareResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OriginalTopologyAwareQuantityList) > 0 { + for iNdEx := len(m.OriginalTopologyAwareQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OriginalTopologyAwareQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.TopologyAwareQuantityList) > 0 { + for iNdEx := len(m.TopologyAwareQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologyAwareQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.OriginalAggregatedQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.OriginalAggregatedQuantity)))) + i-- + dAtA[i] = 0x21 + } + if m.AggregatedQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AggregatedQuantity)))) + i-- + dAtA[i] = 0x19 + } + if m.IsScalarResource { + i-- + if m.IsScalarResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.IsNodeResource { + i-- + if m.IsNodeResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TopologyAwareQuantity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologyAwareQuantity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologyAwareQuantity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Node != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Node)) + i-- + dAtA[i] = 0x10 + } + if m.ResourceValue != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ResourceValue)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *GetTopologyAwareAllocatableResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTopologyAwareAllocatableResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTopologyAwareAllocatableResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *GetTopologyAwareAllocatableResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTopologyAwareAllocatableResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTopologyAwareAllocatableResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllocatableResources) > 0 { + for k := range m.AllocatableResources { + v := m.AllocatableResources[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AllocatableTopologyAwareResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatableTopologyAwareResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatableTopologyAwareResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TopologyAwareCapacityQuantityList) > 0 { + for iNdEx := len(m.TopologyAwareCapacityQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologyAwareCapacityQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.AggregatedCapacityQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AggregatedCapacityQuantity)))) + i-- + dAtA[i] = 0x29 + } + if len(m.TopologyAwareAllocatableQuantityList) > 0 { + for iNdEx := len(m.TopologyAwareAllocatableQuantityList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologyAwareAllocatableQuantityList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.AggregatedAllocatableQuantity != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AggregatedAllocatableQuantity)))) + i-- + dAtA[i] = 0x19 + } + if m.IsScalarResource { + i-- + if m.IsScalarResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.IsNodeResource { + i-- + if m.IsNodeResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PreStartContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreStartContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreStartContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerName) > 0 { + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x22 + } + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x1a + } + if len(m.PodNamespace) > 0 { + i -= len(m.PodNamespace) + copy(dAtA[i:], m.PodNamespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodNamespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.PodUid) > 0 { + i -= len(m.PodUid) + copy(dAtA[i:], m.PodUid) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodUid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PreStartContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreStartContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreStartContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourcePluginOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PreStartRequired { + n += 2 + } + if m.WithTopologyAlignment { + n += 2 + } + if m.NeedReconcile { + n += 2 + } + return n +} + +func (m *RegisterRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Endpoint) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ResourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ContainerName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ContainerType != 0 { + n += 1 + sovApi(uint64(m.ContainerType)) + } + if m.ContainerIndex != 0 { + n += 1 + sovApi(uint64(m.ContainerIndex)) + } + l = len(m.PodRole) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodType) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Hint != nil { + l = m.Hint.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.ResourceRequests) > 0 { + for k, v := range m.ResourceRequests { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + 8 + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceHintsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ContainerName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ContainerType != 0 { + n += 1 + sovApi(uint64(m.ContainerType)) + } + if m.ContainerIndex != 0 { + n += 1 + sovApi(uint64(m.ContainerIndex)) + } + l = len(m.PodRole) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodType) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.ResourceHints) > 0 { + for k, v := range m.ResourceHints { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceAllocationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ContainerName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ContainerType != 0 { + n += 1 + sovApi(uint64(m.ContainerType)) + } + if m.ContainerIndex != 0 { + n += 1 + sovApi(uint64(m.ContainerIndex)) + } + l = len(m.PodRole) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodType) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.AllocationResult != nil { + l = m.AllocationResult.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListOfTopologyHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hints) > 0 { + for _, e := range m.Hints { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *TopologyHint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nodes) > 0 { + l = 0 + for _, e := range m.Nodes { + l += sovApi(uint64(e)) + } + n += 1 + sovApi(uint64(l)) + l + } + if m.Preferred { + n += 2 + } + return n +} + +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RemovePodRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RemovePodResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *GetResourcesAllocationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *GetResourcesAllocationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodResources) > 0 { + for k, v := range m.PodResources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ContainerResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ContainerResources) > 0 { + for k, v := range m.ContainerResources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceAllocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceAllocation) > 0 { + for k, v := range m.ResourceAllocation { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceAllocationInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.OciPropertyName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.IsNodeResource { + n += 2 + } + if m.IsScalarResource { + n += 2 + } + if m.AllocatedQuantity != 0 { + n += 9 + } + l = len(m.AllocationResult) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Envs) > 0 { + for k, v := range m.Envs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if m.ResourceHints != nil { + l = m.ResourceHints.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *GetTopologyAwareResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ContainerName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *GetTopologyAwareResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ContainerTopologyAwareResources != nil { + l = m.ContainerTopologyAwareResources.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ContainerTopologyAwareResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TopologyAwareResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IsNodeResource { + n += 2 + } + if m.IsScalarResource { + n += 2 + } + if m.AggregatedQuantity != 0 { + n += 9 + } + if m.OriginalAggregatedQuantity != 0 { + n += 9 + } + if len(m.TopologyAwareQuantityList) > 0 { + for _, e := range m.TopologyAwareQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.OriginalTopologyAwareQuantityList) > 0 { + for _, e := range m.OriginalTopologyAwareQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *TopologyAwareQuantity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResourceValue != 0 { + n += 9 + } + if m.Node != 0 { + n += 1 + sovApi(uint64(m.Node)) + } + return n +} + +func (m *GetTopologyAwareAllocatableResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *GetTopologyAwareAllocatableResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllocatableResources) > 0 { + for k, v := range m.AllocatableResources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *AllocatableTopologyAwareResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IsNodeResource { + n += 2 + } + if m.IsScalarResource { + n += 2 + } + if m.AggregatedAllocatableQuantity != 0 { + n += 9 + } + if len(m.TopologyAwareAllocatableQuantityList) > 0 { + for _, e := range m.TopologyAwareAllocatableQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.AggregatedCapacityQuantity != 0 { + n += 9 + } + if len(m.TopologyAwareCapacityQuantityList) > 0 { + for _, e := range m.TopologyAwareCapacityQuantityList { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *PreStartContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodUid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodNamespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ContainerName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PreStartContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ResourcePluginOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePluginOptions{`, + `PreStartRequired:` + fmt.Sprintf("%v", this.PreStartRequired) + `,`, + `WithTopologyAlignment:` + fmt.Sprintf("%v", this.WithTopologyAlignment) + `,`, + `NeedReconcile:` + fmt.Sprintf("%v", this.NeedReconcile) + `,`, + `}`, + }, "") + return s +} +func (this *RegisterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegisterRequest{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `Options:` + strings.Replace(this.Options.String(), "ResourcePluginOptions", "ResourcePluginOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequest) String() string { + if this == nil { + return "nil" + } + keysForResourceRequests := make([]string, 0, len(this.ResourceRequests)) + for k := range this.ResourceRequests { + keysForResourceRequests = append(keysForResourceRequests, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourceRequests) + mapStringForResourceRequests := "map[string]float64{" + for _, k := range keysForResourceRequests { + mapStringForResourceRequests += fmt.Sprintf("%v: %v,", k, this.ResourceRequests[k]) + } + mapStringForResourceRequests += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ResourceRequest{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `PodNamespace:` + fmt.Sprintf("%v", this.PodNamespace) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `ContainerType:` + fmt.Sprintf("%v", this.ContainerType) + `,`, + `ContainerIndex:` + fmt.Sprintf("%v", this.ContainerIndex) + `,`, + `PodRole:` + fmt.Sprintf("%v", this.PodRole) + `,`, + `PodType:` + fmt.Sprintf("%v", this.PodType) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `Hint:` + strings.Replace(this.Hint.String(), "TopologyHint", "TopologyHint", 1) + `,`, + `ResourceRequests:` + mapStringForResourceRequests + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *ResourceHintsResponse) String() string { + if this == nil { + return "nil" + } + keysForResourceHints := make([]string, 0, len(this.ResourceHints)) + for k := range this.ResourceHints { + keysForResourceHints = append(keysForResourceHints, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourceHints) + mapStringForResourceHints := "map[string]*ListOfTopologyHints{" + for _, k := range keysForResourceHints { + mapStringForResourceHints += fmt.Sprintf("%v: %v,", k, this.ResourceHints[k]) + } + mapStringForResourceHints += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ResourceHintsResponse{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `PodNamespace:` + fmt.Sprintf("%v", this.PodNamespace) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `ContainerType:` + fmt.Sprintf("%v", this.ContainerType) + `,`, + `ContainerIndex:` + fmt.Sprintf("%v", this.ContainerIndex) + `,`, + `PodRole:` + fmt.Sprintf("%v", this.PodRole) + `,`, + `PodType:` + fmt.Sprintf("%v", this.PodType) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `ResourceHints:` + mapStringForResourceHints + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAllocationResponse) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ResourceAllocationResponse{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `PodNamespace:` + fmt.Sprintf("%v", this.PodNamespace) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `ContainerType:` + fmt.Sprintf("%v", this.ContainerType) + `,`, + `ContainerIndex:` + fmt.Sprintf("%v", this.ContainerIndex) + `,`, + `PodRole:` + fmt.Sprintf("%v", this.PodRole) + `,`, + `PodType:` + fmt.Sprintf("%v", this.PodType) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `AllocationResult:` + strings.Replace(this.AllocationResult.String(), "ResourceAllocation", "ResourceAllocation", 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *ListOfTopologyHints) String() string { + if this == nil { + return "nil" + } + repeatedStringForHints := "[]*TopologyHint{" + for _, f := range this.Hints { + repeatedStringForHints += strings.Replace(f.String(), "TopologyHint", "TopologyHint", 1) + "," + } + repeatedStringForHints += "}" + s := strings.Join([]string{`&ListOfTopologyHints{`, + `Hints:` + repeatedStringForHints + `,`, + `}`, + }, "") + return s +} +func (this *TopologyHint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologyHint{`, + `Nodes:` + fmt.Sprintf("%v", this.Nodes) + `,`, + `Preferred:` + fmt.Sprintf("%v", this.Preferred) + `,`, + `}`, + }, "") + return s +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `}`, + }, "") + return s +} +func (this *RemovePodRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemovePodRequest{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `}`, + }, "") + return s +} +func (this *RemovePodResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemovePodResponse{`, + `}`, + }, "") + return s +} +func (this *GetResourcesAllocationRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetResourcesAllocationRequest{`, + `}`, + }, "") + return s +} +func (this *GetResourcesAllocationResponse) String() string { + if this == nil { + return "nil" + } + keysForPodResources := make([]string, 0, len(this.PodResources)) + for k := range this.PodResources { + keysForPodResources = append(keysForPodResources, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodResources) + mapStringForPodResources := "map[string]*ContainerResources{" + for _, k := range keysForPodResources { + mapStringForPodResources += fmt.Sprintf("%v: %v,", k, this.PodResources[k]) + } + mapStringForPodResources += "}" + s := strings.Join([]string{`&GetResourcesAllocationResponse{`, + `PodResources:` + mapStringForPodResources + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResources) String() string { + if this == nil { + return "nil" + } + keysForContainerResources := make([]string, 0, len(this.ContainerResources)) + for k := range this.ContainerResources { + keysForContainerResources = append(keysForContainerResources, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForContainerResources) + mapStringForContainerResources := "map[string]*ResourceAllocation{" + for _, k := range keysForContainerResources { + mapStringForContainerResources += fmt.Sprintf("%v: %v,", k, this.ContainerResources[k]) + } + mapStringForContainerResources += "}" + s := strings.Join([]string{`&ContainerResources{`, + `ContainerResources:` + mapStringForContainerResources + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAllocation) String() string { + if this == nil { + return "nil" + } + keysForResourceAllocation := make([]string, 0, len(this.ResourceAllocation)) + for k := range this.ResourceAllocation { + keysForResourceAllocation = append(keysForResourceAllocation, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourceAllocation) + mapStringForResourceAllocation := "map[string]*ResourceAllocationInfo{" + for _, k := range keysForResourceAllocation { + mapStringForResourceAllocation += fmt.Sprintf("%v: %v,", k, this.ResourceAllocation[k]) + } + mapStringForResourceAllocation += "}" + s := strings.Join([]string{`&ResourceAllocation{`, + `ResourceAllocation:` + mapStringForResourceAllocation + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAllocationInfo) String() string { + if this == nil { + return "nil" + } + keysForEnvs := make([]string, 0, len(this.Envs)) + for k := range this.Envs { + keysForEnvs = append(keysForEnvs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEnvs) + mapStringForEnvs := "map[string]string{" + for _, k := range keysForEnvs { + mapStringForEnvs += fmt.Sprintf("%v: %v,", k, this.Envs[k]) + } + mapStringForEnvs += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ResourceAllocationInfo{`, + `OciPropertyName:` + fmt.Sprintf("%v", this.OciPropertyName) + `,`, + `IsNodeResource:` + fmt.Sprintf("%v", this.IsNodeResource) + `,`, + `IsScalarResource:` + fmt.Sprintf("%v", this.IsScalarResource) + `,`, + `AllocatedQuantity:` + fmt.Sprintf("%v", this.AllocatedQuantity) + `,`, + `AllocationResult:` + fmt.Sprintf("%v", this.AllocationResult) + `,`, + `Envs:` + mapStringForEnvs + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `ResourceHints:` + strings.Replace(this.ResourceHints.String(), "ListOfTopologyHints", "ListOfTopologyHints", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetTopologyAwareResourcesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTopologyAwareResourcesRequest{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `}`, + }, "") + return s +} +func (this *GetTopologyAwareResourcesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTopologyAwareResourcesResponse{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `PodNamespace:` + fmt.Sprintf("%v", this.PodNamespace) + `,`, + `ContainerTopologyAwareResources:` + strings.Replace(this.ContainerTopologyAwareResources.String(), "ContainerTopologyAwareResources", "ContainerTopologyAwareResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerTopologyAwareResources) String() string { + if this == nil { + return "nil" + } + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "map[string]*TopologyAwareResource{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[k]) + } + mapStringForAllocatedResources += "}" + s := strings.Join([]string{`&ContainerTopologyAwareResources{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `}`, + }, "") + return s +} +func (this *TopologyAwareResource) String() string { + if this == nil { + return "nil" + } + repeatedStringForTopologyAwareQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.TopologyAwareQuantityList { + repeatedStringForTopologyAwareQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForTopologyAwareQuantityList += "}" + repeatedStringForOriginalTopologyAwareQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.OriginalTopologyAwareQuantityList { + repeatedStringForOriginalTopologyAwareQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForOriginalTopologyAwareQuantityList += "}" + s := strings.Join([]string{`&TopologyAwareResource{`, + `IsNodeResource:` + fmt.Sprintf("%v", this.IsNodeResource) + `,`, + `IsScalarResource:` + fmt.Sprintf("%v", this.IsScalarResource) + `,`, + `AggregatedQuantity:` + fmt.Sprintf("%v", this.AggregatedQuantity) + `,`, + `OriginalAggregatedQuantity:` + fmt.Sprintf("%v", this.OriginalAggregatedQuantity) + `,`, + `TopologyAwareQuantityList:` + repeatedStringForTopologyAwareQuantityList + `,`, + `OriginalTopologyAwareQuantityList:` + repeatedStringForOriginalTopologyAwareQuantityList + `,`, + `}`, + }, "") + return s +} +func (this *TopologyAwareQuantity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologyAwareQuantity{`, + `ResourceValue:` + fmt.Sprintf("%v", this.ResourceValue) + `,`, + `Node:` + fmt.Sprintf("%v", this.Node) + `,`, + `}`, + }, "") + return s +} +func (this *GetTopologyAwareAllocatableResourcesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTopologyAwareAllocatableResourcesRequest{`, + `}`, + }, "") + return s +} +func (this *GetTopologyAwareAllocatableResourcesResponse) String() string { + if this == nil { + return "nil" + } + keysForAllocatableResources := make([]string, 0, len(this.AllocatableResources)) + for k := range this.AllocatableResources { + keysForAllocatableResources = append(keysForAllocatableResources, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatableResources) + mapStringForAllocatableResources := "map[string]*AllocatableTopologyAwareResource{" + for _, k := range keysForAllocatableResources { + mapStringForAllocatableResources += fmt.Sprintf("%v: %v,", k, this.AllocatableResources[k]) + } + mapStringForAllocatableResources += "}" + s := strings.Join([]string{`&GetTopologyAwareAllocatableResourcesResponse{`, + `AllocatableResources:` + mapStringForAllocatableResources + `,`, + `}`, + }, "") + return s +} +func (this *AllocatableTopologyAwareResource) String() string { + if this == nil { + return "nil" + } + repeatedStringForTopologyAwareAllocatableQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.TopologyAwareAllocatableQuantityList { + repeatedStringForTopologyAwareAllocatableQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForTopologyAwareAllocatableQuantityList += "}" + repeatedStringForTopologyAwareCapacityQuantityList := "[]*TopologyAwareQuantity{" + for _, f := range this.TopologyAwareCapacityQuantityList { + repeatedStringForTopologyAwareCapacityQuantityList += strings.Replace(f.String(), "TopologyAwareQuantity", "TopologyAwareQuantity", 1) + "," + } + repeatedStringForTopologyAwareCapacityQuantityList += "}" + s := strings.Join([]string{`&AllocatableTopologyAwareResource{`, + `IsNodeResource:` + fmt.Sprintf("%v", this.IsNodeResource) + `,`, + `IsScalarResource:` + fmt.Sprintf("%v", this.IsScalarResource) + `,`, + `AggregatedAllocatableQuantity:` + fmt.Sprintf("%v", this.AggregatedAllocatableQuantity) + `,`, + `TopologyAwareAllocatableQuantityList:` + repeatedStringForTopologyAwareAllocatableQuantityList + `,`, + `AggregatedCapacityQuantity:` + fmt.Sprintf("%v", this.AggregatedCapacityQuantity) + `,`, + `TopologyAwareCapacityQuantityList:` + repeatedStringForTopologyAwareCapacityQuantityList + `,`, + `}`, + }, "") + return s +} +func (this *PreStartContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreStartContainerRequest{`, + `PodUid:` + fmt.Sprintf("%v", this.PodUid) + `,`, + `PodNamespace:` + fmt.Sprintf("%v", this.PodNamespace) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `}`, + }, "") + return s +} +func (this *PreStartContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreStartContainerResponse{`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ResourcePluginOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePluginOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePluginOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStartRequired", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PreStartRequired = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WithTopologyAlignment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WithTopologyAlignment = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NeedReconcile", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NeedReconcile = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegisterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegisterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegisterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ResourcePluginOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerType", wireType) + } + m.ContainerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerType |= ContainerType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerIndex", wireType) + } + m.ContainerIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodRole", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodRole = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hint == nil { + m.Hint = &TopologyHint{} + } + if err := m.Hint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceRequests == nil { + m.ResourceRequests = make(map[string]float64) + } + var mapkey string + var mapvalue float64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + mapvaluetemp = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + mapvalue = math.Float64frombits(mapvaluetemp) + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResourceRequests[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceHintsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceHintsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceHintsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerType", wireType) + } + m.ContainerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerType |= ContainerType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerIndex", wireType) + } + m.ContainerIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodRole", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodRole = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceHints == nil { + m.ResourceHints = make(map[string]*ListOfTopologyHints) + } + var mapkey string + var mapvalue *ListOfTopologyHints + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ListOfTopologyHints{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResourceHints[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAllocationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAllocationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAllocationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerType", wireType) + } + m.ContainerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerType |= ContainerType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerIndex", wireType) + } + m.ContainerIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodRole", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodRole = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationResult", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocationResult == nil { + m.AllocationResult = &ResourceAllocation{} + } + if err := m.AllocationResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOfTopologyHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOfTopologyHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOfTopologyHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hints = append(m.Hints, &TopologyHint{}) + if err := m.Hints[len(m.Hints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologyHint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologyHint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologyHint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Nodes) == 0 { + m.Nodes = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Preferred", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Preferred = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemovePodRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemovePodRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemovePodRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemovePodResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemovePodResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemovePodResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResourcesAllocationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResourcesAllocationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResourcesAllocationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResourcesAllocationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResourcesAllocationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResourcesAllocationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodResources == nil { + m.PodResources = make(map[string]*ContainerResources) + } + var mapkey string + var mapvalue *ContainerResources + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ContainerResources{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodResources[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResources == nil { + m.ContainerResources = make(map[string]*ResourceAllocation) + } + var mapkey string + var mapvalue *ResourceAllocation + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ResourceAllocation{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ContainerResources[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAllocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAllocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAllocation == nil { + m.ResourceAllocation = make(map[string]*ResourceAllocationInfo) + } + var mapkey string + var mapvalue *ResourceAllocationInfo + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ResourceAllocationInfo{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResourceAllocation[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAllocationInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAllocationInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAllocationInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OciPropertyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OciPropertyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNodeResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNodeResource = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsScalarResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsScalarResource = bool(v != 0) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.AllocatedQuantity = float64(math.Float64frombits(v)) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationResult", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationResult = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Envs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Envs == nil { + m.Envs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Envs[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceHints == nil { + m.ResourceHints = &ListOfTopologyHints{} + } + if err := m.ResourceHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTopologyAwareResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTopologyAwareResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTopologyAwareResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTopologyAwareResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTopologyAwareResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTopologyAwareResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTopologyAwareResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerTopologyAwareResources == nil { + m.ContainerTopologyAwareResources = &ContainerTopologyAwareResources{} + } + if err := m.ContainerTopologyAwareResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerTopologyAwareResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerTopologyAwareResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerTopologyAwareResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatedResources == nil { + m.AllocatedResources = make(map[string]*TopologyAwareResource) + } + var mapkey string + var mapvalue *TopologyAwareResource + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &TopologyAwareResource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatedResources[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologyAwareResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologyAwareResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologyAwareResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNodeResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNodeResource = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsScalarResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsScalarResource = bool(v != 0) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.AggregatedQuantity = float64(math.Float64frombits(v)) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalAggregatedQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.OriginalAggregatedQuantity = float64(math.Float64frombits(v)) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyAwareQuantityList = append(m.TopologyAwareQuantityList, &TopologyAwareQuantity{}) + if err := m.TopologyAwareQuantityList[len(m.TopologyAwareQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalTopologyAwareQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalTopologyAwareQuantityList = append(m.OriginalTopologyAwareQuantityList, &TopologyAwareQuantity{}) + if err := m.OriginalTopologyAwareQuantityList[len(m.OriginalTopologyAwareQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologyAwareQuantity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologyAwareQuantity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologyAwareQuantity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ResourceValue = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + m.Node = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Node |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTopologyAwareAllocatableResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTopologyAwareAllocatableResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTopologyAwareAllocatableResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTopologyAwareAllocatableResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTopologyAwareAllocatableResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTopologyAwareAllocatableResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatableResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatableResources == nil { + m.AllocatableResources = make(map[string]*AllocatableTopologyAwareResource) + } + var mapkey string + var mapvalue *AllocatableTopologyAwareResource + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &AllocatableTopologyAwareResource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatableResources[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocatableTopologyAwareResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatableTopologyAwareResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatableTopologyAwareResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNodeResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNodeResource = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsScalarResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsScalarResource = bool(v != 0) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedAllocatableQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.AggregatedAllocatableQuantity = float64(math.Float64frombits(v)) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareAllocatableQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyAwareAllocatableQuantityList = append(m.TopologyAwareAllocatableQuantityList, &TopologyAwareQuantity{}) + if err := m.TopologyAwareAllocatableQuantityList[len(m.TopologyAwareAllocatableQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedCapacityQuantity", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.AggregatedCapacityQuantity = float64(math.Float64frombits(v)) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyAwareCapacityQuantityList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyAwareCapacityQuantityList = append(m.TopologyAwareCapacityQuantityList, &TopologyAwareQuantity{}) + if err := m.TopologyAwareCapacityQuantityList[len(m.TopologyAwareCapacityQuantityList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreStartContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreStartContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreStartContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreStartContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreStartContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreStartContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") +) diff --git a/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/api.proto b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/api.proto new file mode 100644 index 0000000000000..0ad35ab226c6d --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/api.proto @@ -0,0 +1,248 @@ +// To regenerate api.pb.go run hack/update-generated-resource-plugin.sh +syntax = 'proto3'; + +package resourceplugin.v1alpha1; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + +option go_package = "v1alpha1"; + + +// Registration is the service advertised by the Kubelet +// Only when Kubelet answers with a success code to a Register Request +// may Resource Plugins start their service +// Registration may fail when resource plugin version is not supported by +// Kubelet or the registered resourceName is already taken by another +// active resource plugin. Resource plugin is expected to terminate upon registration failure +service Registration { + rpc Register(RegisterRequest) returns (Empty) {} +} + +message ResourcePluginOptions { + // Indicates if PreStartContainer call is required before each container start + bool pre_start_required = 1; + // Indicates if the resource this plugin managed needs topology alignment + bool with_topology_alignment = 2; + // Indicates if the resource needs reconciling allocation result + bool need_reconcile = 3; +} + +message RegisterRequest { + // Version of the API the Resource Plugin was built against + string version = 1; + // Name of the unix socket the resource plugin is listening on + // PATH = path.Join(ResourcePluginPath, endpoint) + string endpoint = 2; + // Schedulable resource name. As of now it's expected to be a DNS Label + string resource_name = 3; + // Options to be communicated with Resource Manager + ResourcePluginOptions options = 4; +} + +enum ContainerType { + INIT = 0; + MAIN = 1; + SIDECAR = 2; + EPHEMERAL = 3; +} + +message ResourceRequest { + string pod_uid = 1; + string pod_namespace = 2; + string pod_name = 3; + string container_name = 4; + ContainerType container_type = 5; + uint64 container_index = 6; + string pod_role = 7; + string pod_type = 8; + string resource_name = 9; + TopologyHint hint = 10; + map resource_requests = 11; + map labels = 12; + map annotations = 13; +} + +message ResourceHintsResponse { + string pod_uid = 1; + string pod_namespace = 2; + string pod_name = 3; + string container_name = 4; + ContainerType container_type = 5; + uint64 container_index = 6; + string pod_role = 7; + string pod_type = 8; + string resource_name = 9; + map resource_hints = 10; + map labels = 11; + map annotations = 12; +} + +message ResourceAllocationResponse { + string pod_uid = 1; + string pod_namespace = 2; + string pod_name = 3; + string container_name = 4; + ContainerType container_type = 5; + uint64 container_index = 6; + string pod_role = 7; + string pod_type = 8; + string resource_name = 9; + ResourceAllocation allocation_result = 10; + map labels = 11; + map annotations = 12; +} + +message ListOfTopologyHints { + repeated TopologyHint hints = 1; +} + +message TopologyHint { + repeated uint64 nodes = 1; + bool preferred = 2; +} + +message Empty { +} + +message RemovePodRequest { + string pod_uid = 1; +} + +message RemovePodResponse { +} + +message GetResourcesAllocationRequest { +} + +message GetResourcesAllocationResponse { + map pod_resources = 1; +} + +message ContainerResources { + map container_resources = 1; +} + +message ResourceAllocation { + map resource_allocation = 1; +} + +message ResourceAllocationInfo { + string oci_property_name = 1; + bool is_node_resource = 2; + bool is_scalar_resource = 3; + // only for exclusive resources + double allocated_quantity = 4; + string allocation_result = 5; + map envs = 6; + map annotations = 7; + // topology hints corresponds to allocation information. + // we need it when kubelet restarts and resurces had been allocated. + // - why don't we use GetTopologyAwareResources of qrm to generate hints? + // - for those resources with accompanying resources, + // we can't generate hints of its accompanying resource by its allocation_result. + ListOfTopologyHints resource_hints = 8; +} + +message GetTopologyAwareResourcesRequest { + string pod_uid = 1; + string container_name = 2; +} + +message GetTopologyAwareResourcesResponse { + string pod_uid = 1; + string pod_name = 2; + string pod_namespace = 3; + ContainerTopologyAwareResources container_topology_aware_resources = 4; +} + +message ContainerTopologyAwareResources { + string container_name = 1; + map allocated_resources = 2; +} + +message TopologyAwareResource { + bool is_node_resource = 1; + bool is_scalar_resource = 2; + double aggregated_quantity = 3; + double original_aggregated_quantity = 4; + repeated TopologyAwareQuantity topology_aware_quantity_list = 5; + repeated TopologyAwareQuantity original_topology_aware_quantity_list = 6; +} + +message TopologyAwareQuantity { + double resource_value = 1; + uint64 node = 2; +} + +message GetTopologyAwareAllocatableResourcesRequest { +} + + +message GetTopologyAwareAllocatableResourcesResponse { + map allocatable_resources = 1; +} + +message AllocatableTopologyAwareResource { + bool is_node_resource = 1; + bool is_scalar_resource = 2; + double aggregated_allocatable_quantity = 3; + repeated TopologyAwareQuantity topology_aware_allocatable_quantity_list = 4; + double aggregated_capacity_quantity = 5; + repeated TopologyAwareQuantity topology_aware_capacity_quantity_list = 6; +} + +// - PreStartContainer is expected to be called before each container start if indicated by plugin during registration phase. +// - PreStartContainer allows kubelet to pass reinitialized resources to containers. +// - PreStartContainer allows Resource Plugin to run resource specific operations on +// the resources requested +message PreStartContainerRequest { + string pod_uid = 1; + string pod_namespace = 2; + string pod_name = 3; + string container_name = 4; +} + +// PreStartContainerResponse will be send by plugin in response to PreStartContainerRequest +message PreStartContainerResponse { +} + +// ResourcePlugin is the service advertised by Resource Plugins +service ResourcePlugin { + // GetTopologyHints returns hints of corresponding resources + rpc GetTopologyHints(ResourceRequest) returns (ResourceHintsResponse) {} + + // Notify the resource plugin that the pod has beed deleted, + // and the plugin should do some clear-up work. + rpc RemovePod(RemovePodRequest) returns (RemovePodResponse) {} + + // GetResourcesAllocation returns allocation results of corresponding resources + rpc GetResourcesAllocation(GetResourcesAllocationRequest) returns (GetResourcesAllocationResponse) {} + + // GetTopologyAwareResources returns allocation results of corresponding resources as topology aware format + rpc GetTopologyAwareResources(GetTopologyAwareResourcesRequest) returns (GetTopologyAwareResourcesResponse) {} + + // GetTopologyAwareResources returns corresponding allocatable resources as topology aware format + rpc GetTopologyAwareAllocatableResources(GetTopologyAwareAllocatableResourcesRequest) returns (GetTopologyAwareAllocatableResourcesResponse) {} + + // GetResourcePluginOptions returns options to be communicated with Resource + // Manager + rpc GetResourcePluginOptions(Empty) returns (ResourcePluginOptions) {} + + // Allocate is called during pod admit so that the resource + // plugin can allocate corresponding resource for the container + // according to resource request + rpc Allocate(ResourceRequest) returns (ResourceAllocationResponse) {} + + // PreStartContainer is called, if indicated by resource plugin during registeration phase, + // before each container start. Resource plugin can run resource specific operations + // such as resetting the resource before making resources available to the container + rpc PreStartContainer(PreStartContainerRequest) returns (PreStartContainerResponse) {} +} diff --git a/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/constants.go b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/constants.go new file mode 100644 index 0000000000000..99f2f8a83f37c --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1/constants.go @@ -0,0 +1,80 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/util/sets" +) + +const ( + // Healthy means that the resource is healthy + Healthy = "Healthy" + // UnHealthy means that the resource is unhealthy + Unhealthy = "Unhealthy" + + // Current version of the API supported by kubelet + Version = "v1alpha1" + // ResourcePluginPath is the folder the Resource Plugin is expecting sockets to be on + // Only privileged pods have access to this path + // Note: Placeholder until we find a "standard path" + ResourcePluginPath = "/var/lib/kubelet/resource-plugins/" + // KubeletSocket is the path of the Kubelet registry socket + KubeletSocket = ResourcePluginPath + "kubelet.sock" + // Timeout duration in secs for PreStartContainer RPC + KubeletResourcePluginPreStartContainerRPCTimeoutInSecs = 30 + // Timeout duration in secs for Allocate RPC + KubeletResourcePluginAllocateRPCTimeoutInSecs = 10 + // Timeout duration in secs for GetTopologyHints RPC + KubeletResourcePluginGetTopologyHintsRPCTimeoutInSecs = 10 + // Timeout duration in secs for RemovePod RPC + KubeletResourcePluginRemovePodRPCTimeoutInSecs = 10 + // Timeout duration in secs for GetResourcesAllocation RPC + KubeletResourcePluginGetResourcesAllocationRPCTimeoutInSecs = 10 + // Timeout duration in secs for GetTopologyAwareResources RPC + KubeletResourcePluginGetTopologyAwareResourcesRPCTimeoutInSecs = 10 + // Timeout duration in secs for GetTopologyAwareAllocatableResources RPC + KubeletResourcePluginGetTopologyAwareAllocatableResourcesRPCTimeoutInSecs = 10 + + PodRoleLabelKey = "katalyst.kubewharf.io/pod_role" + PodTypeAnnotationKey = "katalyst.kubewharf.io/pod_type" + + KatalystQoSLevelAnnotationKey = "katalyst.kubewharf.io/qos_level" + KatalystNumaBindingAnnotationKey = "katalyst.kubewharf.io/numa_binding" + KatalystSkipQRMAdmitAnnotationKey = "katalyst.kubewharf.io/skip_qrm_admit" + + KatalystQoSLevelLabelKey = KatalystQoSLevelAnnotationKey + + KatalystQoSLevelDedicatedCores = "dedicated_cores" + KatalystQoSLevelSharedCores = "shared_cores" + KatalystQoSLevelReclaimedCores = "reclaimed_cores" + KatalystQoSLevelSystemCores = "system_cores" + + KatalystValueTrue = "true" + + KatalystMemoryEnhancementAnnotationKey = "katalyst.kubewharf.io/memory_enhancement" + KatalystCPUEnhancementAnnotationKey = "katalyst.kubewharf.io/cpu_enhancement" + + KatalystMemoryEnhancementKeyNumaBinding = "numa_binding" +) + +var SupportedVersions = [...]string{"v1alpha1"} +var SupportedKatalystQoSLevels = sets.NewString( + KatalystQoSLevelDedicatedCores, + KatalystQoSLevelSharedCores, + KatalystQoSLevelReclaimedCores, + KatalystQoSLevelSystemCores, +) diff --git a/vendor/modules.txt b/vendor/modules.txt index 91a974f73dfa2..047239cee839b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2189,6 +2189,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 +k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1 # k8s.io/legacy-cloud-providers v0.0.0 => ./staging/src/k8s.io/legacy-cloud-providers ## explicit k8s.io/legacy-cloud-providers/aws