From 909ff35e1facc10618f7eed5876e217b9571262b Mon Sep 17 00:00:00 2001 From: Glyn Normington Date: Tue, 6 Nov 2018 17:39:13 +0000 Subject: [PATCH] Move image commands and distro CLI from riff to pfs * switch to counterfeiter for mock generation because of https://github.com/vektra/mockery/issues/213 * fix some tests which flake due to non-deterministic order of ranging over maps * update all the indirect requirements in go.mod which correspond to dependencies in riff Gopkg.lock to use the revision sha and run make to sanitise them * strip out Apache license headers Part of https://github.com/pivotal-cf/pfs/issues/44 Part of https://github.com/pivotal-cf/pfs/issues/45 --- fixtures/arg.yaml | 11 + fixtures/block.yaml | 154 +++++ fixtures/complex.yaml | 1156 +++++++++++++++++++++++++++++++++++ fixtures/invalid.yaml | 1 + fixtures/parameterized.yaml | 58 ++ fixtures/simple.yaml | 12 + fixtures/suffix.yaml | 8 + list.go | 131 ++++ list_test.go | 132 ++++ resource_suite_test.go | 13 + 10 files changed, 1676 insertions(+) create mode 100644 fixtures/arg.yaml create mode 100644 fixtures/block.yaml create mode 100644 fixtures/complex.yaml create mode 100644 fixtures/invalid.yaml create mode 100644 fixtures/parameterized.yaml create mode 100644 fixtures/simple.yaml create mode 100644 fixtures/suffix.yaml create mode 100644 list.go create mode 100644 list_test.go create mode 100644 resource_suite_test.go diff --git a/fixtures/arg.yaml b/fixtures/arg.yaml new file mode 100644 index 0000000..14c6213 --- /dev/null +++ b/fixtures/arg.yaml @@ -0,0 +1,11 @@ +spec: + template: + spec: + containers: + - args: + - -stderrthreshold + - INFO + - -creds-image + - gcr.io/knative-releases/a/b + - -git-image + - gcr.io/knative-releases/c/d diff --git a/fixtures/block.yaml b/fixtures/block.yaml new file mode 100644 index 0000000..2da9689 --- /dev/null +++ b/fixtures/block.yaml @@ -0,0 +1,154 @@ +# Source: istio/templates/sidecar-injector-configmap.yaml + +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: istio + chart: istio-1.0.1 + release: RELEASE-NAME + heritage: Tiller + istio: sidecar-injector +data: + config: |- + policy: disabled + template: |- + initContainers: + - name: istio-init + image: "docker.io/istio/proxy_init:1.0.1" + args: + - "-p" + - [[ .MeshConfig.ProxyListenPort ]] + - "-u" + - 1337 + - "-m" + - [[ or (index .ObjectMeta.Annotations "sidecar.istio.io/interceptionMode") .ProxyConfig.InterceptionMode.String ]] + - "-i" + [[ if (isset .ObjectMeta.Annotations "traffic.sidecar.istio.io/includeOutboundIPRanges") -]] + - "[[ index .ObjectMeta.Annotations "traffic.sidecar.istio.io/includeOutboundIPRanges" ]]" + [[ else -]] + - "*" + [[ end -]] + - "-x" + [[ if (isset .ObjectMeta.Annotations "traffic.sidecar.istio.io/excludeOutboundIPRanges") -]] + - "[[ index .ObjectMeta.Annotations "traffic.sidecar.istio.io/excludeOutboundIPRanges" ]]" + [[ else -]] + - "" + [[ end -]] + - "-b" + [[ if (isset .ObjectMeta.Annotations "traffic.sidecar.istio.io/includeInboundPorts") -]] + - "[[ index .ObjectMeta.Annotations "traffic.sidecar.istio.io/includeInboundPorts" ]]" + [[ else -]] + - [[ range .Spec.Containers -]][[ range .Ports -]][[ .ContainerPort -]], [[ end -]][[ end -]][[ end]] + - "-d" + [[ if (isset .ObjectMeta.Annotations "traffic.sidecar.istio.io/excludeInboundPorts") -]] + - "[[ index .ObjectMeta.Annotations "traffic.sidecar.istio.io/excludeInboundPorts" ]]" + [[ else -]] + - "" + [[ end -]] + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - NET_ADMIN + restartPolicy: Always + + containers: + - name: istio-proxy + image: [[ if (isset .ObjectMeta.Annotations "sidecar.istio.io/proxyImage") -]] + "[[ index .ObjectMeta.Annotations "sidecar.istio.io/proxyImage" ]]" + [[ else -]] + docker.io/istio/proxyv2:1.0.1 + [[ end -]] + args: + - proxy + - sidecar + - --configPath + - [[ .ProxyConfig.ConfigPath ]] + - --binaryPath + - [[ .ProxyConfig.BinaryPath ]] + - --serviceCluster + [[ if ne "" (index .ObjectMeta.Labels "app") -]] + - [[ index .ObjectMeta.Labels "app" ]] + [[ else -]] + - "istio-proxy" + [[ end -]] + - --drainDuration + - [[ formatDuration .ProxyConfig.DrainDuration ]] + - --parentShutdownDuration + - [[ formatDuration .ProxyConfig.ParentShutdownDuration ]] + - --discoveryAddress + - [[ .ProxyConfig.DiscoveryAddress ]] + - --discoveryRefreshDelay + - [[ formatDuration .ProxyConfig.DiscoveryRefreshDelay ]] + - --zipkinAddress + - [[ .ProxyConfig.ZipkinAddress ]] + - --connectTimeout + - [[ formatDuration .ProxyConfig.ConnectTimeout ]] + - --statsdUdpAddress + - [[ .ProxyConfig.StatsdUdpAddress ]] + - --proxyAdminPort + - [[ .ProxyConfig.ProxyAdminPort ]] + - --controlPlaneAuthPolicy + - [[ or (index .ObjectMeta.Annotations "sidecar.istio.io/controlPlaneAuthPolicy") .ProxyConfig.ControlPlaneAuthPolicy ]] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ISTIO_META_INTERCEPTION_MODE + value: [[ or (index .ObjectMeta.Annotations "sidecar.istio.io/interceptionMode") .ProxyConfig.InterceptionMode.String ]] + imagePullPolicy: IfNotPresent + securityContext: + readOnlyRootFilesystem: true + [[ if eq (or (index .ObjectMeta.Annotations "sidecar.istio.io/interceptionMode") .ProxyConfig.InterceptionMode.String) "TPROXY" -]] + capabilities: + add: + - NET_ADMIN + runAsGroup: 1337 + [[ else -]] + runAsUser: 1337 + [[ end -]] + restartPolicy: Always + resources: + [[ if (isset .ObjectMeta.Annotations "sidecar.istio.io/proxyCPU") -]] + requests: + cpu: "[[ index .ObjectMeta.Annotations "sidecar.istio.io/proxyCPU" ]]" + memory: "[[ index .ObjectMeta.Annotations "sidecar.istio.io/proxyMemory" ]]" + [[ else -]] + requests: + cpu: 10m + + [[ end -]] + volumeMounts: + - mountPath: /etc/istio/proxy + name: istio-envoy + - mountPath: /etc/certs/ + name: istio-certs + readOnly: true + volumes: + - emptyDir: + medium: Memory + name: istio-envoy + - name: istio-certs + secret: + optional: true + [[ if eq .Spec.ServiceAccountName "" -]] + secretName: istio.default + [[ else -]] + secretName: [[ printf "istio.%s" .Spec.ServiceAccountName ]] + [[ end -]] diff --git a/fixtures/complex.yaml b/fixtures/complex.yaml new file mode 100644 index 0000000..8543d48 --- /dev/null +++ b/fixtures/complex.yaml @@ -0,0 +1,1156 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: knative-build +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: knative-build-admin +rules: +- apiGroups: + - "" + resources: + - pods + - namespaces + - secrets + - events + - serviceaccounts + - configmaps + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - extensions + resources: + - deployments + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - build.knative.dev + resources: + - builds + - buildtemplates + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: build-controller + namespace: knative-build +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: build-controller-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: knative-build-admin +subjects: +- kind: ServiceAccount + name: build-controller + namespace: knative-build +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: builds.build.knative.dev +spec: + additionalPrinterColumns: + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.completionTime + name: Completed + type: date + group: build.knative.dev + names: + categories: + - all + - knative + kind: Build + plural: builds + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: buildtemplates.build.knative.dev +spec: + additionalPrinterColumns: + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.completionTime + name: Completed + type: date + group: build.knative.dev + names: + categories: + - all + - knative + kind: BuildTemplate + plural: buildtemplates + scope: Namespaced + version: v1alpha1 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: build-controller + name: build-controller + namespace: knative-build +spec: + ports: + - name: metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: build-controller +--- +apiVersion: v1 +kind: Service +metadata: + labels: + role: build-webhook + name: build-webhook + namespace: knative-build +spec: + ports: + - port: 443 + targetPort: 443 + selector: + role: build-webhook +--- +apiVersion: v1 +data: + loglevel.controller: info + loglevel.creds-init: info + loglevel.git-init: info + loglevel.webhook: info + zap-logger-config: | + { + "level": "info", + "development": false, + "sampling": { + "initial": 100, + "thereafter": 100 + }, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "", + "durationEncoder": "", + "callerEncoder": "" + } + } +kind: ConfigMap +metadata: + name: config-logging + namespace: knative-build +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: build-controller + namespace: knative-build +spec: + replicas: 1 + template: + metadata: + labels: + app: build-controller + spec: + containers: + - args: + - -builder + - cluster + - -logtostderr + - -stderrthreshold + - INFO + - -creds-image + - gcr.io/knative-releases/github.com/knative/build/cmd/creds-init@sha256:b5dff24742c5c8ac4673dc991e3f960d11b58efdf751d26c54ec5144c48eef30 + - -git-image + - gcr.io/knative-releases/github.com/knative/build/cmd/git-init@sha256:fe0d19e5da3fc9e7da20abc13d032beafcc283358a8325188dced62536a66e54 + image: gcr.io/knative-releases/github.com/knative/build/cmd/controller@sha256:3981b19105aabf3ed66db38c15407dc7accf026f4f4703d7e0ca7986ffd37d99 + name: build-controller + serviceAccountName: build-controller +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: build-webhook + namespace: knative-build +spec: + replicas: 1 + template: + metadata: + labels: + app: build-webhook + role: build-webhook + spec: + containers: + - args: + - -builder + - cluster + - -logtostderr + - -stderrthreshold + - INFO + image: gcr.io/knative-releases/github.com/knative/build/cmd/webhook@sha256:b9a97b7d360e10e540edfc9329e4f1c01832e58bf57d5dddea5c3a664f64bfc6 + name: build-webhook + volumeMounts: + - mountPath: /etc/config-logging + name: config-logging + serviceAccountName: build-controller + volumes: + - configMap: + name: config-logging + name: config-logging +--- +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: enabled + name: knative-serving +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: knative-serving-admin +rules: +- apiGroups: + - "" + resources: + - pods + - namespaces + - secrets + - configmaps + - endpoints + - services + - events + - serviceaccounts + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - extensions + resources: + - ingresses + - deployments + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/scale + - statefulsets + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - serving.knative.dev + resources: + - configurations + - configurationgenerations + - routes + - revisions + - revisionuids + - autoscalers + - services + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - autoscaling.internal.knative.dev + resources: + - podautoscalers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - build.knative.dev + resources: + - builds + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - networking.istio.io + resources: + - virtualservices + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: knative-serving-write +rules: +- apiGroups: + - "" + resources: + - pods + - namespaces + - secrets + - configmaps + - endpoints + - services + - events + - serviceaccounts + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - extensions + resources: + - ingresses + - deployments + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/scale + - statefulsets + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - serving.knative.dev + resources: + - configurations + - configurationgenerations + - routes + - revisions + - revisionuids + - autoscalers + - services + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - build.knative.dev + resources: + - builds + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - networking.istio.io + resources: + - virtualservices + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller + namespace: knative-serving +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: autoscaler + namespace: knative-serving +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: knative-serving-controller-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: knative-serving-admin +subjects: +- kind: ServiceAccount + name: controller + namespace: knative-serving +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: knative-serving-autoscaler-write +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: knative-serving-write +subjects: +- kind: ServiceAccount + name: autoscaler + namespace: knative-serving +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: knative-shared-gateway + namespace: knative-serving +spec: + selector: + knative: ingressgateway + servers: + - hosts: + - '*' + port: + name: http + number: 80 + protocol: HTTP + - hosts: + - '*' + port: + name: https + number: 443 + protocol: HTTPS + tls: + mode: PASSTHROUGH +--- +apiVersion: v1 +kind: Service +metadata: + labels: + chart: ingressgateway-0.8.0 + heritage: Tiller + knative: ingressgateway + release: RELEASE-NAME + name: knative-ingressgateway + namespace: istio-system +spec: + ports: + - name: http + nodePort: 32380 + port: 80 + - name: https + nodePort: 32390 + port: 443 + - name: tcp + nodePort: 32400 + port: 32400 + selector: + knative: ingressgateway + type: LoadBalancer +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: knative-ingressgateway + chart: ingressgateway-0.8.0 + heritage: Tiller + knative: ingressgateway + release: RELEASE-NAME + name: knative-ingressgateway + namespace: istio-system +spec: + replicas: null + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + labels: + knative: ingressgateway + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + weight: 2 + - preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - ppc64le + weight: 2 + - preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - s390x + weight: 2 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - ppc64le + - s390x + containers: + - args: + - proxy + - router + - -v + - "2" + - --discoveryRefreshDelay + - 1s + - --drainDuration + - 45s + - --parentShutdownDuration + - 1m0s + - --connectTimeout + - 10s + - --serviceCluster + - knative-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --statsdUdpAddress + - istio-statsd-prom-bridge:9125 + - --proxyAdminPort + - "15000" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:8080 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: docker.io/istio/proxyv2:0.8.0 + imagePullPolicy: IfNotPresent + name: ingressgateway + ports: + - containerPort: 80 + - containerPort: 443 + - containerPort: 32400 + resources: {} + volumeMounts: + - mountPath: /etc/certs + name: istio-certs + readOnly: true + - mountPath: /etc/istio/ingressgateway-certs + name: ingressgateway-certs + readOnly: true + serviceAccountName: istio-ingressgateway-service-account + volumes: + - name: istio-certs + secret: + optional: true + secretName: istio.default + - name: ingressgateway-certs + secret: + optional: true + secretName: istio-ingressgateway-certs +--- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: knative-ingressgateway + namespace: istio-system +spec: + maxReplicas: 10 + metrics: + - resource: + name: cpu + targetAverageUtilization: 60 + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1beta1 + kind: Deployment + name: knative-ingressgateway +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: configurations.serving.knative.dev +spec: + group: serving.knative.dev + names: + categories: + - all + - knative + - serving + kind: Configuration + plural: configurations + shortNames: + - config + - cfg + singular: configuration + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: podautoscalers.autoscaling.internal.knative.dev +spec: + group: autoscaling.internal.knative.dev + names: + categories: + - all + - knative-internal + - autoscaling + kind: PodAutoscaler + plural: podautoscalers + shortNames: + - kpa + singular: podautoscaler + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: revisions.serving.knative.dev +spec: + group: serving.knative.dev + names: + categories: + - all + - knative + - serving + kind: Revision + plural: revisions + shortNames: + - rev + singular: revision + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: routes.serving.knative.dev +spec: + group: serving.knative.dev + names: + categories: + - all + - knative + - serving + kind: Route + plural: routes + shortNames: + - rt + singular: route + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: services.serving.knative.dev +spec: + group: serving.knative.dev + names: + categories: + - all + - knative + - serving + kind: Service + plural: services + shortNames: + - kservice + - ksvc + singular: service + scope: Namespaced + version: v1alpha1 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: activator + name: activator-service + namespace: knative-serving +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: activator + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: controller + name: controller + namespace: knative-serving +spec: + ports: + - name: metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: controller +--- +apiVersion: v1 +kind: Service +metadata: + labels: + role: webhook + name: webhook + namespace: knative-serving +spec: + ports: + - port: 443 + targetPort: 443 + selector: + role: webhook +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: activator + namespace: knative-serving +spec: + replicas: 1 + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" + labels: + app: activator + role: activator + spec: + containers: + - args: + - -logtostderr=false + - -stderrthreshold=FATAL + image: gcr.io/knative-releases/github.com/knative/serving/cmd/activator@sha256:e83258dd5858c8b1e92dbd413d0857ad2b22a7c4215ed911f256f68e2972f362 + name: activator + ports: + - containerPort: 8080 + name: activator-port + volumeMounts: + - mountPath: /etc/config-logging + name: config-logging + serviceAccountName: controller + volumes: + - configMap: + name: config-logging + name: config-logging +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: autoscaler + name: autoscaler + namespace: knative-serving +spec: + ports: + - name: websocket + port: 8080 + protocol: TCP + targetPort: 8080 + - name: metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: autoscaler +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: autoscaler + namespace: knative-serving +spec: + replicas: 1 + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" + labels: + app: autoscaler + spec: + containers: + - image: gcr.io/knative-releases/github.com/knative/serving/cmd/autoscaler@sha256:76222399addc02454db9837ea3ff54bae29849168586051a9d0180daa2c1a805 + name: autoscaler + ports: + - containerPort: 8080 + name: websocket + - containerPort: 9090 + name: metrics + volumeMounts: + - mountPath: /etc/config-autoscaler + name: config-autoscaler + - mountPath: /etc/config-logging + name: config-logging + serviceAccountName: controller + volumes: + - configMap: + name: config-autoscaler + name: config-autoscaler + - configMap: + name: config-logging + name: config-logging +--- +apiVersion: v1 +data: + concurrency-quantum-of-time: 100ms + enable-scale-to-zero: "true" + enable-vertical-pod-autoscaling: "false" + max-scale-up-rate: "10" + multi-concurrency-target: "1.0" + panic-window: 6s + scale-to-zero-grace-period: 2m + scale-to-zero-threshold: 5m + single-concurrency-target: "0.9" + stable-window: 60s + tick-interval: 2s + vpa-multi-concurrency-target: "10.0" +kind: ConfigMap +metadata: + name: config-autoscaler + namespace: knative-serving +--- +apiVersion: v1 +data: + queueSidecarImage: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:99c841aa72c2928d1bf333348a848b5afb182715a2a0441da6282c86d4be807e + registriesSkippingTagResolving: ko.local,dev.local +kind: ConfigMap +metadata: + name: config-controller + namespace: knative-serving +--- +apiVersion: v1 +data: + example.com: "" +kind: ConfigMap +metadata: + name: config-domain + namespace: knative-serving +--- +apiVersion: v1 +data: + loglevel.activator: info + loglevel.autoscaler: info + loglevel.controller: info + loglevel.queueproxy: info + loglevel.webhook: info + zap-logger-config: | + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } +kind: ConfigMap +metadata: + name: config-logging + namespace: knative-serving +--- +apiVersion: v1 +data: + istio.sidecar.includeOutboundIPRanges: '*' +kind: ConfigMap +metadata: + name: config-network + namespace: knative-serving +--- +apiVersion: v1 +data: + logging.enable-var-log-collection: "false" + logging.fluentd-sidecar-image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4 + logging.fluentd-sidecar-output-config: | + # Parse json log before sending to Elastic Search + + @type parser + key_name log + + @type multi_format + + format json + time_key fluentd-time # fluentd-time is reserved for structured logs + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + format none + message_key log + + + + # Send to Elastic Search + + @id elasticsearch + @type elasticsearch + @log_level info + include_tag_key true + # Elasticsearch service is in monitoring namespace. + host elasticsearch-logging.monitoring + port 9200 + logstash_format true + + @type file + path /var/log/fluentd-buffers/kubernetes.system.buffer + flush_mode interval + retry_type exponential_backoff + flush_thread_count 2 + flush_interval 5s + retry_forever + retry_max_interval 30 + chunk_limit_size 2M + queue_limit_length 8 + overflow_action block + + + logging.revision-url-template: | + http://localhost:8001/api/v1/namespaces/monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) +kind: ConfigMap +metadata: + name: config-observability + namespace: knative-serving +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: controller + namespace: knative-serving +spec: + replicas: 1 + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + labels: + app: controller + spec: + containers: + - image: gcr.io/knative-releases/github.com/knative/serving/cmd/controller@sha256:28db335f18cbd2a015fd218b9c7ce30b4366898fa3728a7f6dab6537991de028 + name: controller + ports: + - containerPort: 9090 + name: metrics + volumeMounts: + - mountPath: /etc/config-logging + name: config-logging + serviceAccountName: controller + volumes: + - configMap: + name: config-logging + name: config-logging +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: webhook + namespace: knative-serving +spec: + replicas: 1 + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + labels: + app: webhook + role: webhook + spec: + containers: + - image: gcr.io/knative-releases/github.com/knative/serving/cmd/webhook@sha256:50ea89c48f8890fbe0cee336fc5cbdadcfe6884afbe5977db5d66892095b397d + name: webhook + volumeMounts: + - mountPath: /etc/config-logging + name: config-logging + serviceAccountName: controller + volumes: + - configMap: + name: config-logging + name: config-logging +--- diff --git a/fixtures/invalid.yaml b/fixtures/invalid.yaml new file mode 100644 index 0000000..9d68933 --- /dev/null +++ b/fixtures/invalid.yaml @@ -0,0 +1 @@ +" \ No newline at end of file diff --git a/fixtures/parameterized.yaml b/fixtures/parameterized.yaml new file mode 100644 index 0000000..29b65da --- /dev/null +++ b/fixtures/parameterized.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: build.knative.dev/v1alpha1 +kind: BuildTemplate +metadata: + name: riff-cnb +spec: + parameters: + - name: IMAGE + description: The image you wish to create. For example, "repo/example", or "example.com/repo/image". + - name: RUN_IMAGE + description: The run image buildpacks will use as the base for IMAGE. + default: packs/run + - name: USE_CRED_HELPERS + description: Use Docker credential helpers for Google's GCR, Amazon's ECR, or Microsoft's ACR. + default: 'true' + + steps: + - name: prepare + image: packs/base + command: ["/lifecycle/knative-helper"] + volumeMounts: + - name: app-cache + mountPath: /cache + imagePullPolicy: Always + - name: detect + image: projectriff/buildpack + command: ["/lifecycle/detector"] + imagePullPolicy: Always + - name: analyze + image: packs/util + command: ["/lifecycle/analyzer"] + args: ["${IMAGE}"] + env: + - name: PACK_USE_HELPERS + value: ${USE_CRED_HELPERS} + imagePullPolicy: Always + - name: build + image: projectriff/buildpack + command: ["/lifecycle/builder"] + volumeMounts: + - name: app-cache + mountPath: /cache + imagePullPolicy: Always + - name: export + image: packs/util + command: ["/lifecycle/exporter"] + args: ["${IMAGE}"] + env: + - name: PACK_RUN_IMAGE + value: ${RUN_IMAGE} + - name: PACK_USE_HELPERS + value: ${USE_CRED_HELPERS} + imagePullPolicy: Always + + volumes: + - name: app-cache + persistentVolumeClaim: + claimName: riff-cnb-cache \ No newline at end of file diff --git a/fixtures/simple.yaml b/fixtures/simple.yaml new file mode 100644 index 0000000..b7e5289 --- /dev/null +++ b/fixtures/simple.yaml @@ -0,0 +1,12 @@ +spec: + template: + spec: + containers: + - args: + - -builder + - cluster + - -logtostderr + - -stderrthreshold + - INFO + image: gcr.io/knative-releases/x/y + name: build-webhook diff --git a/fixtures/suffix.yaml b/fixtures/suffix.yaml new file mode 100644 index 0000000..4554ebf --- /dev/null +++ b/fixtures/suffix.yaml @@ -0,0 +1,8 @@ +data: + queueSidecarImage: gcr.io/knative-releases/e/f + registriesSkippingTagResolving: ko.local,dev.local +--- +data: + logging.enable-var-log-collection: "false" + logging.fluentd-sidecar-image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4 + diff --git a/list.go b/list.go new file mode 100644 index 0000000..8803294 --- /dev/null +++ b/list.go @@ -0,0 +1,131 @@ +package scan + +import ( + "fmt" + "strings" + + "github.com/ghodss/yaml" + "github.com/projectriff/riff/pkg/resource" +) + +func ListImages(res string, baseDir string) ([]string, error) { + fmt.Printf("Scanning %s\n", res) + contents, err := resource.Load(res, baseDir) + if err != nil { + return nil, err + } + + imgs := []string{} + + docs := strings.Split(string(contents), "---\n") + for _, doc := range docs { + if strings.TrimSpace(doc) != "" { + y := make(map[string]interface{}) + err = yaml.Unmarshal([]byte(doc), &y) + if err != nil { + return nil, fmt.Errorf("error parsing resource file %s: %v", res, err) + } + + visitImages(y, func(imageName string) { + imgs = append(imgs, imageName) + }) + } + } + + return imgs, nil +} + +func visitImages(y interface{}, visitor func(string)) { + switch v := y.(type) { + case map[string]interface{}: + if val, ok := v["image"]; ok { + if vs, ok := val.(string); ok { + visitor(vs) + } + } + + if args, ok := v["args"]; ok { + if ar, ok := args.([]interface{}); ok { + for i, a := range ar { + if a, ok := a.(string); ok { + if strings.HasPrefix(a, "-") && strings.HasSuffix(a, "-image") && len(ar) > i+1 { + if b, ok := ar[i+1].(string); ok { + visitor(b) + } + } + } + } + } + } + + if val, ok := v["config"]; ok { + if vs, ok := val.(string); ok { + y := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(vs), &y) + if err == nil { + visitImages(y, visitor) + } + } + } + + if val, ok := v["template"]; ok { + if vs, ok := val.(string); ok { + // treat templates as lines each of which may contain YAML + lines := strings.Split(vs, "\n") + for _, line := range lines { + y := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(line), &y) + if err == nil { + visitImages(y, visitor) + } + } + } + } + + if parms, ok := v["parameters"]; ok { + if pr, ok := parms.([]interface{}); ok { + for _, p := range pr { + if pmap, ok := p.(map[string]interface{}); ok { + // if this parameter map has a "name" key which indicates an image and a "default" key with a + // string value, treat the value as a possible image + if name, ok := stringMapValue(pmap, "name"); ok { + if strings.HasSuffix(name, "IMAGE") { + if deflt, ok := stringMapValue(pmap, "default"); ok { + visitor(deflt) + } + } + } + } + } + } + } + + for key, val := range v { + if strings.HasSuffix(key, "Image") || strings.HasSuffix(key, "-image") { + if vs, ok := val.(string); ok { + visitor(vs) + } + } + visitImages(val, visitor) + } + case map[interface{}]interface{}: + for _, val := range v { + visitImages(val, visitor) + } + case []interface{}: + for _, u := range v { + visitImages(u, visitor) + } + default: + } +} + +func stringMapValue(m map[string]interface{}, key string) (string, bool) { + if value, ok := m[key]; ok { + if valueStr, ok := value.(string); ok { + return valueStr, true + + } + } + return "", false +} diff --git a/list_test.go b/list_test.go new file mode 100644 index 0000000..6dd0f72 --- /dev/null +++ b/list_test.go @@ -0,0 +1,132 @@ +package scan_test + +import ( + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pivotal-cf/pfs/pkg/scan" +) + +var _ = Describe("ListImages", func() { + var ( + res string + baseDir string + images []string + err error + ) + + BeforeEach(func() { + wd, err := os.Getwd() + Expect(err).NotTo(HaveOccurred()) + baseDir = filepath.Join(wd, "fixtures") + }) + + JustBeforeEach(func() { + images, err = scan.ListImages(res, baseDir) + }) + + Context("when the resource file names an image directly", func() { + BeforeEach(func() { + res = "simple.yaml" + }) + + It("should list the image", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(ConsistOf("gcr.io/knative-releases/x/y")) + }) + }) + + Context("when the resource file names images as arguments", func() { + BeforeEach(func() { + res = "arg.yaml" + }) + + It("should list the images", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(ConsistOf("gcr.io/knative-releases/a/b", "gcr.io/knative-releases/c/d")) + }) + }) + + Context("when the resource file names an image using a key ending in 'Image'", func() { + BeforeEach(func() { + res = "suffix.yaml" + }) + + It("should list the images", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(ConsistOf("gcr.io/knative-releases/e/f", "k8s.gcr.io/fluentd-elasticsearch:v2.0.4")) + }) + }) + + Context("when the resource file contains block scalars containing images", func() { + BeforeEach(func() { + res = "block.yaml" + }) + + It("should list the images", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(ConsistOf("docker.io/istio/proxy_init:1.0.1")) + }) + }) + + Context("when using a realistic resource file", func() { + BeforeEach(func() { + res = "complex.yaml" + }) + + It("should list the images in the resource file", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(ConsistOf("gcr.io/knative-releases/github.com/knative/build/cmd/controller@sha256:3981b19105aabf3ed66db38c15407dc7accf026f4f4703d7e0ca7986ffd37d99", + "gcr.io/knative-releases/github.com/knative/build/cmd/creds-init@sha256:b5dff24742c5c8ac4673dc991e3f960d11b58efdf751d26c54ec5144c48eef30", + "gcr.io/knative-releases/github.com/knative/build/cmd/git-init@sha256:fe0d19e5da3fc9e7da20abc13d032beafcc283358a8325188dced62536a66e54", + "gcr.io/knative-releases/github.com/knative/build/cmd/webhook@sha256:b9a97b7d360e10e540edfc9329e4f1c01832e58bf57d5dddea5c3a664f64bfc6", + "docker.io/istio/proxyv2:0.8.0", + "gcr.io/knative-releases/github.com/knative/serving/cmd/activator@sha256:e83258dd5858c8b1e92dbd413d0857ad2b22a7c4215ed911f256f68e2972f362", + "gcr.io/knative-releases/github.com/knative/serving/cmd/autoscaler@sha256:76222399addc02454db9837ea3ff54bae29849168586051a9d0180daa2c1a805", + "gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:99c841aa72c2928d1bf333348a848b5afb182715a2a0441da6282c86d4be807e", + "k8s.gcr.io/fluentd-elasticsearch:v2.0.4", + "gcr.io/knative-releases/github.com/knative/serving/cmd/controller@sha256:28db335f18cbd2a015fd218b9c7ce30b4366898fa3728a7f6dab6537991de028", + "gcr.io/knative-releases/github.com/knative/serving/cmd/webhook@sha256:50ea89c48f8890fbe0cee336fc5cbdadcfe6884afbe5977db5d66892095b397d", + )) + }) + }) + + Context("when using a parameterized resource file", func() { + BeforeEach(func() { + res = "parameterized.yaml" + }) + + It("should list the images in the resource file", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(ConsistOf("packs/run", + "packs/base", + "projectriff/buildpack", + "projectriff/buildpack", + "packs/util", + "packs/util", + )) + }) + }) + + Context("when the resource file is not found", func() { + BeforeEach(func() { + res = "nosuch.yaml" + }) + + It("should return a suitable error", func() { + Expect(err).To(MatchError(HaveSuffix("no such file or directory"))) + }) + }) + + Context("when the resource file contains invalid YAML", func() { + BeforeEach(func() { + res = "invalid.yaml" + }) + + It("should return a suitable error", func() { + Expect(err).To(MatchError(HavePrefix("error parsing resource file"))) + }) + }) +}) diff --git a/resource_suite_test.go b/resource_suite_test.go new file mode 100644 index 0000000..fc5d35e --- /dev/null +++ b/resource_suite_test.go @@ -0,0 +1,13 @@ +package scan_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestResource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Resource Suite") +}