diff --git a/Gopkg.lock b/Gopkg.lock index a2563b5b2..ac29691b7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -128,12 +128,12 @@ version = "v0.17.2" [[projects]] - digest = "1:dfab391de021809e0041f0ab5648da6b74dd16a685472a1b8c3dc06b3dca1ee2" + branch = "master" + digest = "1:8f80caf2fa31f78a035f33981c9685013033073b53f344f579e60fa69f0c6670" name = "github.com/go-openapi/spec" packages = ["."] pruneopts = "NUT" - revision = "5bae59e25b21498baea7f9d46e9c147ec106a42e" - version = "v0.17.2" + revision = "53d776530bf78a11b03a7b52dd8a083086b045e5" [[projects]] digest = "1:983f95b2fae6fe8fdd361738325ed6090f4f3bd15ce4db745e899fb5b0fdfc46" @@ -1035,8 +1035,7 @@ version = "v0.1.0" [[projects]] - branch = "master" - digest = "1:67864daa8a5832b6a0a81969fd17dc00552c5970f204baa2b30f922b603f89f7" + digest = "1:72ebd1873445f862a522e1afeb2f6ffc0c1bfefee22721d89fedf931427f2257" name = "k8s.io/kube-openapi" packages = [ "cmd/openapi-gen/args", @@ -1047,7 +1046,7 @@ "pkg/util/sets", ] pruneopts = "NUT" - revision = "c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d" + revision = "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" [[projects]] digest = "1:8b23a397f3b9fa8abd3588e83cd2db4e8b0aaf3832b8554bcdbffb33d57b3955" @@ -1106,6 +1105,7 @@ "k8s.io/api/batch/v1beta1", "k8s.io/api/core/v1", "k8s.io/api/extensions/v1beta1", + "k8s.io/api/rbac/v1", "k8s.io/apimachinery/pkg/api/errors", "k8s.io/apimachinery/pkg/api/resource", "k8s.io/apimachinery/pkg/apis/meta/v1", diff --git a/README.adoc b/README.adoc index ba5cb8ce1..48f9a84c5 100644 --- a/README.adoc +++ b/README.adoc @@ -221,6 +221,17 @@ spec: <1> Identifies the kafka configuration used by the collector, to produce the messages, and the ingester to consume the messages <2> The deadlock interval can be disabled to avoid the ingester being terminated when no messages arrive within the default 1 minute period +== Elasticsearch storage + +If no `es.server-urls` are provided Jaeger operator creates Elasticsearch CR based on the configuration +provided in storage section. Make sure link:https://github.com/openshift/elasticsearch-operator[elasticsearch-operator] +is running in your cluster otherwise Elasticsearch deployment will not be created. The Elasticsearch is meant +to be dedicated for a single Jaeger instance. + +At the moment there can be only one Jaeger with Elasticsearch instance in a namespace. + +Note that Elasticsearch requires virtual memory settings: `minikube ssh -- 'sudo sysctl -w vm.max_map_count=262144'` + == Accessing the UI === Kubernetes diff --git a/build/Dockerfile b/build/Dockerfile index f3e088a2d..dae161d0d 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,4 +1,15 @@ -FROM alpine:3.8 +FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base + +RUN INSTALL_PKGS=" \ + openssl \ + " && \ + yum install -y $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + yum clean all && \ + mkdir /tmp/_working_dir && \ + chmod og+w /tmp/_working_dir + +COPY scripts/* /scripts/ USER nobody diff --git a/deploy/examples/simple-prod-deploy-es.yaml b/deploy/examples/simple-prod-deploy-es.yaml new file mode 100644 index 000000000..911d40f96 --- /dev/null +++ b/deploy/examples/simple-prod-deploy-es.yaml @@ -0,0 +1,16 @@ +# This CR deploys Jaeger and Elasticsearch +apiVersion: io.jaegertracing/v1alpha1 +kind: Jaeger +metadata: + name: simple-prod +spec: + strategy: production + storage: + type: elasticsearch + elasticsearch: + nodeCount: 2 + resources: + esIndexCleaner: + enabled: true + schedule: "*/30 * * * *" + numberOfDays: 1 diff --git a/deploy/role.yaml b/deploy/role.yaml index 798a79062..12ea4f262 100644 --- a/deploy/role.yaml +++ b/deploy/role.yaml @@ -59,3 +59,16 @@ rules: - routes verbs: - "*" +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - elasticsearch.jaegertracing.io + resources: + - jaeger + verbs: + - 'get' diff --git a/pkg/account/main.go b/pkg/account/main.go index badcd0f4d..f7e4177a9 100644 --- a/pkg/account/main.go +++ b/pkg/account/main.go @@ -1,7 +1,10 @@ package account import ( + "fmt" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" ) @@ -12,5 +15,28 @@ func Get(jaeger *v1alpha1.Jaeger) []*v1.ServiceAccount { if jaeger.Spec.Ingress.Security == v1alpha1.IngressSecurityOAuthProxy { accounts = append(accounts, OAuthProxy(jaeger)) } - return accounts + return append(accounts, getMain(jaeger)) +} + +func getMain(jaeger *v1alpha1.Jaeger) *v1.ServiceAccount { + trueVar := true + return &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: JaegerServiceAccountFor(jaeger), + Namespace: jaeger.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: jaeger.APIVersion, + Kind: jaeger.Kind, + Name: jaeger.Name, + UID: jaeger.UID, + Controller: &trueVar, + }, + }, + }, + } +} + +func JaegerServiceAccountFor(jaeger *v1alpha1.Jaeger) string { + return fmt.Sprintf("%s", jaeger.Name) } diff --git a/pkg/account/main_test.go b/pkg/account/main_test.go index b2d3de797..dcb23aa4d 100644 --- a/pkg/account/main_test.go +++ b/pkg/account/main_test.go @@ -11,19 +11,27 @@ import ( func TestWithSecurityNil(t *testing.T) { jaeger := v1alpha1.NewJaeger("TestWithOAuthProxyNil") assert.Equal(t, v1alpha1.IngressSecurityNone, jaeger.Spec.Ingress.Security) - assert.Len(t, Get(jaeger), 0) + sas := Get(jaeger) + assert.Len(t, sas, 1) + assert.Equal(t, getMain(jaeger), sas[0]) } func TestWithSecurityNone(t *testing.T) { jaeger := v1alpha1.NewJaeger("TestWithOAuthProxyFalse") jaeger.Spec.Ingress.Security = v1alpha1.IngressSecurityNone - - assert.Len(t, Get(jaeger), 0) + sas := Get(jaeger) + assert.Len(t, sas, 1) + assert.Equal(t, getMain(jaeger), sas[0]) } func TestWithSecurityOAuthProxy(t *testing.T) { jaeger := v1alpha1.NewJaeger("TestWithOAuthProxyTrue") jaeger.Spec.Ingress.Security = v1alpha1.IngressSecurityOAuthProxy - assert.Len(t, Get(jaeger), 1) + assert.Len(t, Get(jaeger), 2) +} + +func TestJaegerName(t *testing.T) { + jaeger := v1alpha1.NewJaeger("foo") + assert.Equal(t, "foo", JaegerServiceAccountFor(jaeger)) } diff --git a/pkg/apis/io/v1alpha1/jaeger_types.go b/pkg/apis/io/v1alpha1/jaeger_types.go index a0839ab1d..5e20bfd37 100644 --- a/pkg/apis/io/v1alpha1/jaeger_types.go +++ b/pkg/apis/io/v1alpha1/jaeger_types.go @@ -3,6 +3,9 @@ package v1alpha1 import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/jaegertracing/jaeger-operator/pkg/storage/elasticsearch/v1alpha1" + esv1alpha1 "github.com/jaegertracing/jaeger-operator/pkg/storage/elasticsearch/v1alpha1" ) // IngressSecurityType represents the possible values for the security type @@ -136,6 +139,15 @@ type JaegerStorageSpec struct { CassandraCreateSchema JaegerCassandraCreateSchemaSpec `json:"cassandraCreateSchema"` SparkDependencies JaegerDependenciesSpec `json:"dependencies"` EsIndexCleaner JaegerEsIndexCleanerSpec `json:"esIndexCleaner"` + Elasticsearch ElasticsearchSpec `json:"elasticsearch"` +} + +type ElasticsearchSpec struct { + Resources v1.ResourceRequirements `json:"resources"` + NodeCount int32 `json:"nodeCount"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Storage esv1alpha1.ElasticsearchStorageSpec `json:"storage"` + RedundancyPolicy v1alpha1.RedundancyPolicyType `json:"redundancyPolicy"` } // JaegerCassandraCreateSchemaSpec holds the options related to the create-schema batch job diff --git a/pkg/apis/io/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/io/v1alpha1/zz_generated.deepcopy.go index c5ea5e67c..a50cfa620 100644 --- a/pkg/apis/io/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/io/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,31 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Storage.DeepCopyInto(&out.Storage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSpec. +func (in *ElasticsearchSpec) DeepCopy() *ElasticsearchSpec { + if in == nil { + return nil + } + out := new(ElasticsearchSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FreeForm) DeepCopyInto(out *FreeForm) { *out = *in @@ -386,6 +411,7 @@ func (in *JaegerStorageSpec) DeepCopyInto(out *JaegerStorageSpec) { in.CassandraCreateSchema.DeepCopyInto(&out.CassandraCreateSchema) in.SparkDependencies.DeepCopyInto(&out.SparkDependencies) in.EsIndexCleaner.DeepCopyInto(&out.EsIndexCleaner) + in.Elasticsearch.DeepCopyInto(&out.Elasticsearch) return } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index cc03cc1c6..55687b9c1 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -2,7 +2,10 @@ package controller import ( routev1 "github.com/openshift/api/route/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/manager" + + esv1alpha1 "github.com/jaegertracing/jaeger-operator/pkg/storage/elasticsearch/v1alpha1" ) // AddToManagerFuncs is a list of functions to add all Controllers to the Manager @@ -13,6 +16,9 @@ func AddToManager(m manager.Manager) error { if err := routev1.AddToScheme(m.GetScheme()); err != nil { return err } + // TODO temporal fix https://github.com/jaegertracing/jaeger-operator/issues/206 + gv := schema.GroupVersion{Group: "logging.openshift.io", Version: "v1alpha1"} + m.GetScheme().AddKnownTypes(gv, &esv1alpha1.Elasticsearch{}) for _, f := range AddToManagerFuncs { if err := f(m); err != nil { diff --git a/pkg/cronjob/es_index_cleaner.go b/pkg/cronjob/es_index_cleaner.go index 7fd41f2e1..742bcac92 100644 --- a/pkg/cronjob/es_index_cleaner.go +++ b/pkg/cronjob/es_index_cleaner.go @@ -16,6 +16,7 @@ import ( func CreateEsIndexCleaner(jaeger *v1alpha1.Jaeger) *batchv1beta1.CronJob { esUrls := getEsHostname(jaeger.Spec.Storage.Options.Map()) trueVar := true + one := int32(1) name := fmt.Sprintf("%s-es-index-cleaner", jaeger.Name) return &batchv1beta1.CronJob{ ObjectMeta: metav1.ObjectMeta{ @@ -35,6 +36,7 @@ func CreateEsIndexCleaner(jaeger *v1alpha1.Jaeger) *batchv1beta1.CronJob { Schedule: jaeger.Spec.Storage.EsIndexCleaner.Schedule, JobTemplate: batchv1beta1.JobTemplateSpec{ Spec: batchv1.JobSpec{ + Parallelism: &one, Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{ diff --git a/pkg/cronjob/spark_dependencies.go b/pkg/cronjob/spark_dependencies.go index 5efe4d28c..4051be399 100644 --- a/pkg/cronjob/spark_dependencies.go +++ b/pkg/cronjob/spark_dependencies.go @@ -29,6 +29,7 @@ func CreateSparkDependencies(jaeger *v1alpha1.Jaeger) *batchv1beta1.CronJob { envVars = append(envVars, getStorageEnvs(jaeger.Spec.Storage)...) trueVar := true + one := int32(1) name := fmt.Sprintf("%s-spark-dependencies", jaeger.Name) return &batchv1beta1.CronJob{ ObjectMeta: metav1.ObjectMeta{ @@ -45,9 +46,11 @@ func CreateSparkDependencies(jaeger *v1alpha1.Jaeger) *batchv1beta1.CronJob { }, }, Spec: batchv1beta1.CronJobSpec{ - Schedule: jaeger.Spec.Storage.SparkDependencies.Schedule, + ConcurrencyPolicy: batchv1beta1.ForbidConcurrent, + Schedule: jaeger.Spec.Storage.SparkDependencies.Schedule, JobTemplate: batchv1beta1.JobTemplateSpec{ Spec: batchv1.JobSpec{ + Parallelism: &one, Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{ diff --git a/pkg/deployment/all-in-one.go b/pkg/deployment/all-in-one.go index dfdcbc502..8d6eb1786 100644 --- a/pkg/deployment/all-in-one.go +++ b/pkg/deployment/all-in-one.go @@ -10,6 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/jaegertracing/jaeger-operator/pkg/account" "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" "github.com/jaegertracing/jaeger-operator/pkg/config/sampling" "github.com/jaegertracing/jaeger-operator/pkg/config/ui" @@ -157,7 +158,8 @@ func (a *AllInOne) Get() *appsv1.Deployment { }, Resources: commonSpec.Resources, }}, - Volumes: commonSpec.Volumes, + Volumes: commonSpec.Volumes, + ServiceAccountName: account.JaegerServiceAccountFor(a.jaeger), }, }, }, diff --git a/pkg/deployment/collector.go b/pkg/deployment/collector.go index 84ca66a0c..5b6999901 100644 --- a/pkg/deployment/collector.go +++ b/pkg/deployment/collector.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/jaegertracing/jaeger-operator/pkg/account" "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" "github.com/jaegertracing/jaeger-operator/pkg/config/sampling" "github.com/jaegertracing/jaeger-operator/pkg/service" @@ -147,7 +148,8 @@ func (c *Collector) Get() *appsv1.Deployment { }, Resources: commonSpec.Resources, }}, - Volumes: commonSpec.Volumes, + Volumes: commonSpec.Volumes, + ServiceAccountName: account.JaegerServiceAccountFor(c.jaeger), }, }, }, diff --git a/pkg/deployment/ingester.go b/pkg/deployment/ingester.go index 251ca15b0..fb9436821 100644 --- a/pkg/deployment/ingester.go +++ b/pkg/deployment/ingester.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/jaegertracing/jaeger-operator/pkg/account" "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" "github.com/jaegertracing/jaeger-operator/pkg/storage" "github.com/jaegertracing/jaeger-operator/pkg/util" @@ -133,7 +134,8 @@ func (i *Ingester) Get() *appsv1.Deployment { }, Resources: commonSpec.Resources, }}, - Volumes: commonSpec.Volumes, + Volumes: commonSpec.Volumes, + ServiceAccountName: account.JaegerServiceAccountFor(i.jaeger), }, }, }, diff --git a/pkg/deployment/query.go b/pkg/deployment/query.go index 78ced973a..90f2a0b43 100644 --- a/pkg/deployment/query.go +++ b/pkg/deployment/query.go @@ -10,6 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/jaegertracing/jaeger-operator/pkg/account" "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" "github.com/jaegertracing/jaeger-operator/pkg/config/ui" "github.com/jaegertracing/jaeger-operator/pkg/service" @@ -132,7 +133,8 @@ func (q *Query) Get() *appsv1.Deployment { }, Resources: commonSpec.Resources, }}, - Volumes: commonSpec.Volumes, + Volumes: commonSpec.Volumes, + ServiceAccountName: account.JaegerServiceAccountFor(q.jaeger), }, }, }, diff --git a/pkg/storage/elasticsearch.go b/pkg/storage/elasticsearch.go new file mode 100644 index 000000000..40d4113ee --- /dev/null +++ b/pkg/storage/elasticsearch.go @@ -0,0 +1,127 @@ +package storage + +import ( + "strings" + + "github.com/pkg/errors" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" + esv1alpha1 "github.com/jaegertracing/jaeger-operator/pkg/storage/elasticsearch/v1alpha1" +) + +const ( + // #nosec G101: Potential hardcoded credentials (Confidence: LOW, Severity: HIGH) + k8sTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + volumeName = "certs" + volumeMountPath = "/certs" + caPath = volumeMountPath + "/ca" + keyPath = volumeMountPath + "/key" + certPath = volumeMountPath + "/cert" + elasticsearchUrl = "https://elasticsearch:9200" +) + +func ShouldDeployElasticsearch(s v1alpha1.JaegerStorageSpec) bool { + if !strings.EqualFold(s.Type, "elasticsearch") { + return false + } + _, ok := s.Options.Map()["es.server-urls"] + return !ok +} + +type ElasticsearchDeployment struct { + Jaeger *v1alpha1.Jaeger +} + +func (ed *ElasticsearchDeployment) InjectStorageConfiguration(p *v1.PodSpec) { + p.Volumes = append(p.Volumes, v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: secretName(ed.Jaeger.Name, jaegerSecret.name), + }, + }, + }) + // we assume jaeger containers are first + if len(p.Containers) > 0 { + // TODO add to archive storage if it is enabled? + p.Containers[0].Args = append(p.Containers[0].Args, + "--es.server-urls="+elasticsearchUrl, + "--es.token-file="+k8sTokenFile, + "--es.tls.ca="+caPath) + p.Containers[0].VolumeMounts = append(p.Containers[0].VolumeMounts, v1.VolumeMount{ + Name: volumeName, + ReadOnly: true, + MountPath: volumeMountPath, + }) + } +} + +func (ed *ElasticsearchDeployment) InjectIndexCleanerConfiguration(p *v1.PodSpec) { + p.Volumes = append(p.Volumes, v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: secretName(ed.Jaeger.Name, curatorSecret.name), + }, + }, + }) + // we assume jaeger containers are first + if len(p.Containers) > 0 { + // the size of arguments array should be always 2 + p.Containers[0].Args[1] = elasticsearchUrl + p.Containers[0].Env = append(p.Containers[0].Env, + v1.EnvVar{Name: "ES_TLS", Value: "true"}, + v1.EnvVar{Name: "ES_TLS_CA", Value: caPath}, + v1.EnvVar{Name: "ES_TLS_KEY", Value: keyPath}, + v1.EnvVar{Name: "ES_TLS_CERT", Value: certPath}, + ) + p.Containers[0].VolumeMounts = append(p.Containers[0].VolumeMounts, v1.VolumeMount{ + Name: volumeName, + ReadOnly: true, + MountPath: volumeMountPath, + }) + } +} + +func (ed *ElasticsearchDeployment) CreateElasticsearchObjects(serviceAccounts ...string) ([]runtime.Object, error) { + err := createESCerts(certScript) + if err != nil { + return nil, errors.Wrap(err, "failed to create Elasticsearch certificates") + } + os := []runtime.Object{} + esSecret := createESSecrets(ed.Jaeger) + for _, s := range esSecret { + os = append(os, s) + } + os = append(os, getESRoles(ed.Jaeger, serviceAccounts...)...) + os = append(os, ed.createCr()) + return os, nil +} + +func (ed *ElasticsearchDeployment) createCr() *esv1alpha1.Elasticsearch { + return &esv1alpha1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ed.Jaeger.Namespace, + Name: esSecret.name, + OwnerReferences: []metav1.OwnerReference{asOwner(ed.Jaeger)}, + }, + Spec: esv1alpha1.ElasticsearchSpec{ + ManagementState: esv1alpha1.ManagementStateManaged, + RedundancyPolicy: ed.Jaeger.Spec.Storage.Elasticsearch.RedundancyPolicy, + Spec: esv1alpha1.ElasticsearchNodeSpec{ + Resources: ed.Jaeger.Spec.Storage.Elasticsearch.Resources, + }, + Nodes: []esv1alpha1.ElasticsearchNode{ + { + NodeCount: ed.Jaeger.Spec.Storage.Elasticsearch.NodeCount, + Storage: ed.Jaeger.Spec.Storage.Elasticsearch.Storage, + NodeSelector: ed.Jaeger.Spec.Storage.Elasticsearch.NodeSelector, + Roles: []esv1alpha1.ElasticsearchNodeRole{esv1alpha1.ElasticsearchRoleClient, esv1alpha1.ElasticsearchRoleData, esv1alpha1.ElasticsearchRoleMaster}, + }, + }, + }, + } +} diff --git a/pkg/storage/elasticsearch/v1alpha1/types.go b/pkg/storage/elasticsearch/v1alpha1/types.go new file mode 100644 index 000000000..b8fb683fd --- /dev/null +++ b/pkg/storage/elasticsearch/v1alpha1/types.go @@ -0,0 +1,206 @@ +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TODO remove this file, it's temporary copied from es-operator due to old SDK dependency +// https://github.com/jaegertracing/jaeger-operator/issues/206 + +const ( + ServiceAccountName string = "elasticsearch" + ConfigMapName string = "elasticsearch" + SecretName string = "elasticsearch" +) + +// ElasticsearchList struct represents list of Elasticsearch objects +type ElasticsearchList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []Elasticsearch `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Elasticsearch struct represents Elasticsearch cluster CRD +type Elasticsearch struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ElasticsearchSpec `json:"spec"` + Status ElasticsearchStatus `json:"status,omitempty"` +} + +// RedundancyPolicyType controls number of elasticsearch replica shards +type RedundancyPolicyType string + +const ( + // FullRedundancy - each index is fully replicated on every Data node in the cluster + FullRedundancy RedundancyPolicyType = "FullRedundancy" + // MultipleRedundancy - each index is spread over half of the Data nodes + MultipleRedundancy RedundancyPolicyType = "MultipleRedundancy" + // SingleRedundancy - one replica shard + SingleRedundancy RedundancyPolicyType = "SingleRedundancy" + // ZeroRedundancy - no replica shards + ZeroRedundancy RedundancyPolicyType = "ZeroRedundancy" +) + +// ElasticsearchSpec struct represents the Spec of Elasticsearch cluster CRD +type ElasticsearchSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + RedundancyPolicy RedundancyPolicyType `json:"redundancyPolicy"` + Nodes []ElasticsearchNode `json:"nodes"` + Spec ElasticsearchNodeSpec `json:"nodeSpec"` +} + +// ElasticsearchNode struct represents individual node in Elasticsearch cluster +type ElasticsearchNode struct { + Roles []ElasticsearchNodeRole `json:"roles"` + NodeCount int32 `json:"nodeCount"` + Resources v1.ResourceRequirements `json:"resources"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Storage ElasticsearchStorageSpec `json:"storage"` +} + +type ElasticsearchStorageSpec struct { + StorageClassName string `json:"storageClassName,omitempty"` + Size *resource.Quantity `json:"size,omitempty"` +} + +// ElasticsearchNodeStatus represents the status of individual Elasticsearch node +type ElasticsearchNodeStatus struct { + DeploymentName string `json:"deploymentName,omitempty"` + ReplicaSetName string `json:"replicaSetName,omitempty"` + StatefulSetName string `json:"statefulSetName,omitempty"` + PodName string `json:"podName,omitempty"` + Status string `json:"status,omitempty"` + UpgradeStatus ElasticsearchNodeUpgradeStatus `json:"upgradeStatus,omitempty"` + Roles []ElasticsearchNodeRole `json:"roles,omitempty"` +} + +type ElasticsearchNodeUpgradeStatus struct { + UnderUpgrade UpgradeStatus `json:"underUpgrade,omitempty"` + UpgradePhase ElasticsearchUpgradePhase `json:"upgradePhase,omitempty"` +} + +type ElasticsearchUpgradePhase string + +const ( + NodeRestarting ElasticsearchUpgradePhase = "nodeRestarting" + RecoveringData ElasticsearchUpgradePhase = "recoveringData" + ControllerUpdated ElasticsearchUpgradePhase = "controllerUpdated" +) + +// ElasticsearchNodeSpec represents configuration of an individual Elasticsearch node +type ElasticsearchNodeSpec struct { + Image string `json:"image,omitempty"` + Resources v1.ResourceRequirements `json:"resources"` +} + +type UpgradeStatus string + +const ( + UnderUpgradeTrue UpgradeStatus = "True" + UnderUpgradeFalse UpgradeStatus = "False" +) + +type ElasticsearchRequiredAction string + +const ( + ElasticsearchActionRollingRestartNeeded ElasticsearchRequiredAction = "RollingRestartNeeded" + ElasticsearchActionFullRestartNeeded ElasticsearchRequiredAction = "FullRestartNeeded" + ElasticsearchActionInterventionNeeded ElasticsearchRequiredAction = "InterventionNeeded" + ElasticsearchActionNewClusterNeeded ElasticsearchRequiredAction = "NewClusterNeeded" + ElasticsearchActionNone ElasticsearchRequiredAction = "ClusterOK" + ElasticsearchActionScaleDownNeeded ElasticsearchRequiredAction = "ScaleDownNeeded" +) + +type ElasticsearchNodeRole string + +const ( + ElasticsearchRoleClient ElasticsearchNodeRole = "client" + ElasticsearchRoleData ElasticsearchNodeRole = "data" + ElasticsearchRoleMaster ElasticsearchNodeRole = "master" +) + +type ShardAllocationState string + +const ( + ShardAllocationTrue ShardAllocationState = "True" + ShardAllocationFalse ShardAllocationState = "False" +) + +// ElasticsearchStatus represents the status of Elasticsearch cluster +type ElasticsearchStatus struct { + Nodes []ElasticsearchNodeStatus `json:"nodes"` + ClusterHealth string `json:"clusterHealth"` + ShardAllocationEnabled ShardAllocationState `json:"shardAllocationEnabled"` + Pods map[ElasticsearchNodeRole]PodStateMap `json:"pods"` + Conditions []ClusterCondition `json:"conditions"` +} + +type PodStateMap map[PodStateType][]string + +type PodStateType string + +const ( + PodStateTypeReady PodStateType = "ready" + PodStateTypeNotReady PodStateType = "notReady" + PodStateTypeFailed PodStateType = "failed" +) + +type ManagementState string + +const ( + // Managed means that the operator is actively managing its resources and trying to keep the component active. + // It will only upgrade the component if it is safe to do so + ManagementStateManaged ManagementState = "Managed" + // Unmanaged means that the operator will not take any action related to the component + ManagementStateUnmanaged ManagementState = "Unmanaged" +) + +// ClusterCondition contains details for the current condition of this elasticsearch cluster. +type ClusterCondition struct { + // Type is the type of the condition. + Type ClusterConditionType `json:"type"` + // Status is the status of the condition. + Status ConditionStatus `json:"status"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// ClusterConditionType is a valid value for ClusterCondition.Type +type ClusterConditionType string + +// These are valid conditions for elasticsearch node +const ( + UpdatingSettings ClusterConditionType = "UpdatingSettings" + ScalingUp ClusterConditionType = "ScalingUp" + ScalingDown ClusterConditionType = "ScalingDown" + Restarting ClusterConditionType = "Restarting" +) + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ClusterEvent string + +const ( + ScaledDown ClusterEvent = "ScaledDown" + ScaledUp ClusterEvent = "ScaledUp" + UpdateClusterSettings ClusterEvent = "UpdateClusterSettings" + NoEvent ClusterEvent = "NoEvent" +) diff --git a/pkg/storage/elasticsearch/v1alpha1/zz_generated.deepcopy.go b/pkg/storage/elasticsearch/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8c0975034 --- /dev/null +++ b/pkg/storage/elasticsearch/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,305 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// TODO remove this file, it's temporary copied from es-operator due to old SDK dependency +// https://github.com/jaegertracing/jaeger-operator/issues/206 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Elasticsearch) DeepCopyInto(out *Elasticsearch) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Elasticsearch. +func (in *Elasticsearch) DeepCopy() *Elasticsearch { + if in == nil { + return nil + } + out := new(Elasticsearch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Elasticsearch) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchList) DeepCopyInto(out *ElasticsearchList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Elasticsearch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchList. +func (in *ElasticsearchList) DeepCopy() *ElasticsearchList { + if in == nil { + return nil + } + out := new(ElasticsearchList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ElasticsearchList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchNode) DeepCopyInto(out *ElasticsearchNode) { + *out = *in + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]ElasticsearchNodeRole, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Storage.DeepCopyInto(&out.Storage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchNode. +func (in *ElasticsearchNode) DeepCopy() *ElasticsearchNode { + if in == nil { + return nil + } + out := new(ElasticsearchNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchNodeSpec) DeepCopyInto(out *ElasticsearchNodeSpec) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchNodeSpec. +func (in *ElasticsearchNodeSpec) DeepCopy() *ElasticsearchNodeSpec { + if in == nil { + return nil + } + out := new(ElasticsearchNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchNodeStatus) DeepCopyInto(out *ElasticsearchNodeStatus) { + *out = *in + out.UpgradeStatus = in.UpgradeStatus + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]ElasticsearchNodeRole, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchNodeStatus. +func (in *ElasticsearchNodeStatus) DeepCopy() *ElasticsearchNodeStatus { + if in == nil { + return nil + } + out := new(ElasticsearchNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchNodeUpgradeStatus) DeepCopyInto(out *ElasticsearchNodeUpgradeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchNodeUpgradeStatus. +func (in *ElasticsearchNodeUpgradeStatus) DeepCopy() *ElasticsearchNodeUpgradeStatus { + if in == nil { + return nil + } + out := new(ElasticsearchNodeUpgradeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]ElasticsearchNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSpec. +func (in *ElasticsearchSpec) DeepCopy() *ElasticsearchSpec { + if in == nil { + return nil + } + out := new(ElasticsearchSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]ElasticsearchNodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make(map[ElasticsearchNodeRole]PodStateMap, len(*in)) + for key, val := range *in { + var outVal map[PodStateType][]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + (*out)[key] = outVal + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchStatus. +func (in *ElasticsearchStatus) DeepCopy() *ElasticsearchStatus { + if in == nil { + return nil + } + out := new(ElasticsearchStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchStorageSpec) DeepCopyInto(out *ElasticsearchStorageSpec) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchStorageSpec. +func (in *ElasticsearchStorageSpec) DeepCopy() *ElasticsearchStorageSpec { + if in == nil { + return nil + } + out := new(ElasticsearchStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PodStateMap) DeepCopyInto(out *PodStateMap) { + { + in := &in + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStateMap. +func (in PodStateMap) DeepCopy() PodStateMap { + if in == nil { + return nil + } + out := new(PodStateMap) + in.DeepCopyInto(out) + return *out +} diff --git a/pkg/storage/elasticsearch_role.go b/pkg/storage/elasticsearch_role.go new file mode 100644 index 000000000..c5c934927 --- /dev/null +++ b/pkg/storage/elasticsearch_role.go @@ -0,0 +1,54 @@ +package storage + +import ( + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" +) + +func getESRoles(jaeger *v1alpha1.Jaeger, sas ...string) []runtime.Object { + roleName := fmt.Sprintf("%s-elasticsearch", jaeger.Name) + r := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{rbacv1.AutoUpdateAnnotationKey: "true"}, + Name: roleName, + Namespace: jaeger.Namespace, + OwnerReferences: []metav1.OwnerReference{asOwner(jaeger)}, + }, + Rules: []rbacv1.PolicyRule{ + { + // These values are virtual and defined in SearchGuard sg_config.yml under subjectAccessReviews + // The SG invokes this API to allow the request + // TOKEN=$(oc serviceaccounts get-token jaeger-simple-prod) + // curl -k -v -XPOST -H "Content-Type: application/json" -H "Authorization: Bearer $TOKEN" https://127.0.0.1:8443/apis/authorization.k8s.io/v1/selfsubjectaccessreviews -d '{"kind":"SelfSubjectAccessReview","apiVersion":"authorization.k8s.io/v1","spec":{"resourceAttributes":{"group":"jaeger.openshift.io","verb":"get","resource":"jaeger"}}}' + APIGroups: []string{"elasticsearch.jaegertracing.io"}, + Resources: []string{"jaeger"}, + Verbs: []string{"get"}, + }, + }, + } + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: jaeger.Namespace, + OwnerReferences: []metav1.OwnerReference{asOwner(jaeger)}, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: roleName, + }, + } + for _, sa := range sas { + sb := rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: jaeger.Namespace, + Name: sa, + } + rb.Subjects = append(rb.Subjects, sb) + } + return []runtime.Object{r, rb} +} diff --git a/pkg/storage/elasticsearch_role_test.go b/pkg/storage/elasticsearch_role_test.go new file mode 100644 index 000000000..03668a0aa --- /dev/null +++ b/pkg/storage/elasticsearch_role_test.go @@ -0,0 +1,50 @@ +package storage + +import ( + "testing" + + "github.com/stretchr/testify/assert" + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" +) + +func TestGetESRoles_NoDeployment(t *testing.T) { + j := v1alpha1.NewJaeger("foo") + roles := getESRoles(j) + assert.Equal(t, 2, len(roles)) + r := roles[0].(*rbacv1.Role) + assert.Equal(t, []rbacv1.PolicyRule{ + { + Verbs: []string{"get"}, + Resources: []string{"jaeger"}, + APIGroups: []string{"elasticsearch.jaegertracing.io"}, + }, + }, r.Rules) + rb := roles[1].(*rbacv1.RoleBinding) + assert.Equal(t, 0, len(rb.Subjects)) +} + +func TestGetESRoles_ServiceAccount(t *testing.T) { + j := v1alpha1.NewJaeger("foo") + j.Namespace = "myproject" + roles := getESRoles(j, "bar") + assert.Equal(t, 2, len(roles)) + r := roles[0].(*rbacv1.Role) + assert.Equal(t, []rbacv1.PolicyRule{ + { + Verbs: []string{"get"}, + Resources: []string{"jaeger"}, + APIGroups: []string{"elasticsearch.jaegertracing.io"}, + }, + }, r.Rules) + rb := roles[1].(*rbacv1.RoleBinding) + assert.Equal(t, "foo-elasticsearch", rb.Name) + assert.Equal(t, []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: "bar", + Namespace: "myproject", + }, + }, rb.Subjects) +} diff --git a/pkg/storage/elasticsearch_secrets.go b/pkg/storage/elasticsearch_secrets.go new file mode 100644 index 000000000..070a1128c --- /dev/null +++ b/pkg/storage/elasticsearch_secrets.go @@ -0,0 +1,153 @@ +package storage + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + + "github.com/operator-framework/operator-sdk/pkg/k8sutil" + "github.com/sirupsen/logrus" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" +) + +const ( + workingDir = "/tmp/_working_dir" + certScript = "./scripts/cert_generation.sh" +) + +type secret struct { + name string + content map[string]string +} + +// TODO remove? +var masterSecret = secret{ + name: "master-certs", + content: map[string]string{ + "masterca": "ca.crt", + "masterkey": "ca.key", + }, +} + +var esSecret = secret{ + name: "elasticsearch", + content: map[string]string{ + "elasticsearch.key": "elasticsearch.key", + "elasticsearch.crt": "elasticsearch.crt", + "logging-es.key": "logging-es.key", + "logging-es.crt": "logging-es.crt", + "admin-key": "system.admin.key", + "admin-cert": "system.admin.crt", + "admin-ca": "ca.crt", + }, +} + +var jaegerSecret = secret{ + name: "jaeger-elasticsearch", + content: map[string]string{ + "ca": "ca.crt", + }, +} + +var curatorSecret = secret{ + name: "curator", + content: map[string]string{ + "ca": "ca.crt", + "key": "system.logging.curator.key", + "cert": "system.logging.curator.crt", + }, +} + +func secretName(jaeger, secret string) string { + return fmt.Sprintf("%s-%s", jaeger, secret) +} + +func createESSecrets(jaeger *v1alpha1.Jaeger) []*v1.Secret { + return []*v1.Secret{ + // master and ES secrets use hardcoded name - e.g. do not use instance name in it + // the other problem for us is that sg_config.yml defines a role which depends on namespace + // we could make the "resource" configurable once ES image and es-operator-are refactored + // https://jira.coreos.com/browse/LOG-326 + createSecret(jaeger, masterSecret.name, getWorkingDirContents(masterSecret.content)), + createSecret(jaeger, esSecret.name, getWorkingDirContents(esSecret.content)), + createSecret(jaeger, secretName(jaeger.Name, jaegerSecret.name), getWorkingDirContents(jaegerSecret.content)), + createSecret(jaeger, secretName(jaeger.Name, curatorSecret.name), getWorkingDirContents(curatorSecret.content)), + } +} + +// createESCerts runs bash scripts which generates certificates +func createESCerts(script string) error { + namespace, err := k8sutil.GetWatchNamespace() + if err != nil { + return fmt.Errorf("failed to get watch namespace: %v", err) + } + // #nosec G204: Subprocess launching should be audited + cmd := exec.Command("bash", script) + cmd.Env = append(os.Environ(), + "NAMESPACE="+namespace, + ) + if out, err := cmd.CombinedOutput(); err != nil { + logrus.WithFields(logrus.Fields{ + "script": script, + "out": string(out)}). + Error("Failed to create certificates") + return fmt.Errorf("error running script %s: %v", script, err) + } + return nil +} + +func createSecret(jaeger *v1alpha1.Jaeger, secretName string, data map[string][]byte) *v1.Secret { + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: jaeger.Namespace, + OwnerReferences: []metav1.OwnerReference{asOwner(jaeger)}, + }, + Type: v1.SecretTypeOpaque, + Data: data, + } +} + +func asOwner(jaeger *v1alpha1.Jaeger) metav1.OwnerReference { + b := true + return metav1.OwnerReference{ + APIVersion: jaeger.APIVersion, + Kind: jaeger.Kind, + Name: jaeger.Name, + UID: jaeger.UID, + Controller: &b, + } +} + +func getWorkingDirContents(content map[string]string) map[string][]byte { + c := map[string][]byte{} + for secretKey, certName := range content { + c[secretKey] = getWorkingDirFileContents(certName) + } + return c +} + +func getWorkingDirFileContents(filePath string) []byte { + return getFileContents(getWorkingDirFilePath(filePath)) +} + +func getWorkingDirFilePath(toFile string) string { + return path.Join(workingDir, toFile) +} + +func getFileContents(path string) []byte { + if path == "" { + return nil + } + contents, err := ioutil.ReadFile(filepath.Clean(path)) + if err != nil { + return nil + } + return contents +} diff --git a/pkg/storage/elasticsearch_secrets_test.go b/pkg/storage/elasticsearch_secrets_test.go new file mode 100644 index 000000000..0c6cbeab4 --- /dev/null +++ b/pkg/storage/elasticsearch_secrets_test.go @@ -0,0 +1,88 @@ +package storage + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/api/core/v1" + + "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" +) + +func TestCreateCerts_ErrNoNamespace(t *testing.T) { + err := createESCerts(certScript) + assert.EqualError(t, err, "failed to get watch namespace: WATCH_NAMESPACE must be set") +} + +func TestCreateCerts_ErrNoScript(t *testing.T) { + os.Setenv("WATCH_NAMESPACE", "invalid.&*)(") + defer os.Unsetenv("WATCH_NAMESPACE") + err := createESCerts("invalid") + assert.EqualError(t, err, "error running script invalid: exit status 127") +} + +func TestCreateESSecrets(t *testing.T) { + defer os.RemoveAll(workingDir) + j := v1alpha1.NewJaeger("foo") + os.Setenv("WATCH_NAMESPACE", "invalid.&*)(") + defer os.Unsetenv("WATCH_NAMESPACE") + fmt.Println(os.Getwd()) + err := createESCerts("../../scripts/cert_generation.sh") + assert.NoError(t, err) + sec := createESSecrets(j) + assert.Equal(t, 4, len(sec)) + assert.Equal(t, []string{ + "master-certs", + "elasticsearch", + fmt.Sprintf("%s-%s", j.Name, jaegerSecret.name), + fmt.Sprintf("%s-%s", j.Name, curatorSecret.name)}, + []string{sec[0].Name, sec[1].Name, sec[2].Name, sec[3].Name}) + for _, s := range sec { + if s.Name == fmt.Sprintf("%s-%s", j.Name, jaegerSecret.name) { + ca, err := ioutil.ReadFile(workingDir + "/ca.crt") + assert.NoError(t, err) + assert.Equal(t, map[string][]byte{"ca": ca}, s.Data) + } + } +} + +func TestCreteSecret(t *testing.T) { + defer os.RemoveAll(workingDir) + j := v1alpha1.NewJaeger("foo") + j.Namespace = "myproject" + s := createSecret(j, "bar", map[string][]byte{"foo": {}}) + assert.Equal(t, "bar", s.ObjectMeta.Name) + assert.Equal(t, j.Namespace, s.ObjectMeta.Namespace) + assert.Equal(t, j.Name, s.ObjectMeta.OwnerReferences[0].Name) + assert.Equal(t, j.Name, s.ObjectMeta.OwnerReferences[0].Name) + assert.Equal(t, map[string][]byte{"foo": {}}, s.Data) + assert.Equal(t, v1.SecretTypeOpaque, s.Type) +} + +func TestGetWorkingFileDirContent(t *testing.T) { + defer os.RemoveAll(workingDir) + err := os.MkdirAll(workingDir, os.ModePerm) + assert.NoError(t, err) + err = ioutil.WriteFile(workingDir+"/foobar", []byte("foo"), 0644) + assert.NoError(t, err) + b := getWorkingDirFileContents("foobar") + assert.Equal(t, "foo", string(b)) +} + +func TestGetWorkingFileDirContent_EmptyPath(t *testing.T) { + b := getWorkingDirFileContents("") + assert.Nil(t, b) +} + +func TestGetWorkingFileDirContent_FileDoesNotExists(t *testing.T) { + b := getWorkingDirFileContents("jungle") + assert.Nil(t, b) +} + +func TestGetFileContet_EmptyPath(t *testing.T) { + b := getFileContents("") + assert.Nil(t, b) +} diff --git a/pkg/storage/elasticsearch_test.go b/pkg/storage/elasticsearch_test.go new file mode 100644 index 000000000..c64516353 --- /dev/null +++ b/pkg/storage/elasticsearch_test.go @@ -0,0 +1,79 @@ +package storage + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" +) + +func TestShouldDeployElasticsearch(t *testing.T) { + tests := []struct { + j v1alpha1.JaegerStorageSpec + expected bool + }{ + {j: v1alpha1.JaegerStorageSpec{}}, + {j: v1alpha1.JaegerStorageSpec{Type: "cassandra"}}, + {j: v1alpha1.JaegerStorageSpec{Type: "elasticsearch", Options: v1alpha1.NewOptions(map[string]interface{}{"es.server-urls": "foo"})}}, + {j: v1alpha1.JaegerStorageSpec{Type: "elasticsearch"}, expected: true}, + } + for _, test := range tests { + assert.Equal(t, test.expected, ShouldDeployElasticsearch(test.j)) + } +} + +func TestCreateElasticsearchCR(t *testing.T) { + trueVar := true + j := v1alpha1.NewJaeger("foo") + j.Namespace = "myproject" + es := &ElasticsearchDeployment{Jaeger: j} + cr := es.createCr() + assert.Equal(t, "myproject", cr.Namespace) + assert.Equal(t, "elasticsearch", cr.Name) + assert.Equal(t, []metav1.OwnerReference{{Name: "foo", Controller: &trueVar}}, cr.OwnerReferences) +} + +func TestInject(t *testing.T) { + p := &v1.PodSpec{ + Containers: []v1.Container{{ + Args: []string{"foo"}, + VolumeMounts: []v1.VolumeMount{{Name: "lol"}}, + }}, + } + es := &ElasticsearchDeployment{Jaeger: v1alpha1.NewJaeger("hoo")} + es.InjectStorageConfiguration(p) + expVolumes := []v1.Volume{{Name: "certs", VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: "hoo-jaeger-elasticsearch", + }, + }}} + assert.Equal(t, expVolumes, p.Volumes) + expContainers := []v1.Container{{ + Args: []string{ + "foo", + "--es.server-urls=https://elasticsearch:9200", + "--es.token-file=" + k8sTokenFile, + "--es.tls.ca=" + caPath, + }, + VolumeMounts: []v1.VolumeMount{ + {Name: "lol"}, + { + Name: volumeName, + ReadOnly: true, + MountPath: volumeMountPath, + }, + }, + }} + assert.Equal(t, expContainers, p.Containers) +} + +func TestCreateElasticsearchObjects(t *testing.T) { + j := v1alpha1.NewJaeger("foo") + es := &ElasticsearchDeployment{Jaeger: j} + objs, err := es.CreateElasticsearchObjects() + assert.Nil(t, objs) + assert.EqualError(t, err, "failed to create Elasticsearch certificates: failed to get watch namespace: WATCH_NAMESPACE must be set") +} diff --git a/pkg/strategy/all-in-one_test.go b/pkg/strategy/all-in-one_test.go index 6009f776b..9a5ec4d4c 100644 --- a/pkg/strategy/all-in-one_test.go +++ b/pkg/strategy/all-in-one_test.go @@ -81,7 +81,7 @@ func TestDelegateAllInOneDepedencies(t *testing.T) { func assertDeploymentsAndServicesForAllInOne(t *testing.T, name string, objs []runtime.Object, hasDaemonSet bool, hasOAuthProxy bool, hasConfigMap bool) { // TODO(jpkroehling): this func deserves a refactoring already - expectedNumObjs := 6 + expectedNumObjs := 7 if hasDaemonSet { expectedNumObjs++ diff --git a/pkg/strategy/controller.go b/pkg/strategy/controller.go index ee709fbe4..88fd76b57 100644 --- a/pkg/strategy/controller.go +++ b/pkg/strategy/controller.go @@ -96,6 +96,7 @@ func normalize(jaeger *v1alpha1.Jaeger) { normalizeSparkDependencies(&jaeger.Spec.Storage.SparkDependencies, jaeger.Spec.Storage.Type) normalizeIndexCleaner(&jaeger.Spec.Storage.EsIndexCleaner, jaeger.Spec.Storage.Type) + normalizeElasticsearch(&jaeger.Spec.Storage.Elasticsearch) } func normalizeSparkDependencies(spec *v1alpha1.JaegerDependenciesSpec, storage string) { @@ -129,6 +130,12 @@ func normalizeIndexCleaner(spec *v1alpha1.JaegerEsIndexCleanerSpec, storage stri } } +func normalizeElasticsearch(spec *v1alpha1.ElasticsearchSpec) { + if spec.NodeCount == 0 { + spec.NodeCount = 1 + } +} + func unknownStorage(typ string) bool { for _, k := range storage.ValidTypes() { if strings.EqualFold(typ, k) { diff --git a/pkg/strategy/controller_test.go b/pkg/strategy/controller_test.go index 3305aa67f..8434f6741 100644 --- a/pkg/strategy/controller_test.go +++ b/pkg/strategy/controller_test.go @@ -47,7 +47,7 @@ func TestNewControllerForProduction(t *testing.T) { ctrl := For(context.TODO(), jaeger) ds := ctrl.Create() - assert.Len(t, ds, 6) + assert.Len(t, ds, 7) } func TestUnknownStorage(t *testing.T) { diff --git a/pkg/strategy/production.go b/pkg/strategy/production.go index f0fa02d17..800abffd4 100644 --- a/pkg/strategy/production.go +++ b/pkg/strategy/production.go @@ -7,6 +7,7 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/viper" batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" "k8s.io/apimachinery/pkg/runtime" "github.com/jaegertracing/jaeger-operator/pkg/account" @@ -56,11 +57,11 @@ func (c *productionStrategy) Create() []runtime.Object { os = append(os, scmp) } + cDep := collector.Get() + queryDep := inject.OAuthProxy(c.jaeger, query.Get()) + // add the deployments - os = append(os, - collector.Get(), - inject.OAuthProxy(c.jaeger, query.Get()), - ) + os = append(os, cDep, queryDep) if ds := agent.Get(); nil != ds { os = append(os, ds) @@ -94,14 +95,33 @@ func (c *productionStrategy) Create() []runtime.Object { } } + var indexCleaner *batchv1beta1.CronJob if isBoolTrue(c.jaeger.Spec.Storage.EsIndexCleaner.Enabled) { if strings.EqualFold(c.jaeger.Spec.Storage.Type, "elasticsearch") { - os = append(os, cronjob.CreateEsIndexCleaner(c.jaeger)) + indexCleaner = cronjob.CreateEsIndexCleaner(c.jaeger) + os = append(os, indexCleaner) } else { logrus.WithField("type", c.jaeger.Spec.Storage.Type).Warn("Skipping Elasticsearch index cleaner job due to unsupported storage.") } } + if storage.ShouldDeployElasticsearch(c.jaeger.Spec.Storage) { + es := &storage.ElasticsearchDeployment{ + Jaeger: c.jaeger, + } + objs, err := es.CreateElasticsearchObjects(cDep.Spec.Template.Spec.ServiceAccountName, queryDep.Spec.Template.Spec.ServiceAccountName) + if err != nil { + logrus.Error("Could not create Elasticsearch objects, Elasticsearch will not be deployed", err) + } else { + os = append(os, objs...) + es.InjectStorageConfiguration(&queryDep.Spec.Template.Spec) + es.InjectStorageConfiguration(&cDep.Spec.Template.Spec) + if indexCleaner != nil { + es.InjectIndexCleanerConfiguration(&indexCleaner.Spec.JobTemplate.Spec.Template.Spec) + } + } + } + return os } diff --git a/pkg/strategy/production_test.go b/pkg/strategy/production_test.go index 1a427c4f6..324b422a7 100644 --- a/pkg/strategy/production_test.go +++ b/pkg/strategy/production_test.go @@ -123,7 +123,7 @@ func TestDelegateProductionDepedencies(t *testing.T) { } func assertDeploymentsAndServicesForProduction(t *testing.T, name string, objs []runtime.Object, hasDaemonSet bool, hasOAuthProxy bool, hasConfigMap bool) { - expectedNumObjs := 6 + expectedNumObjs := 7 if hasDaemonSet { expectedNumObjs++ @@ -161,7 +161,7 @@ func assertDeploymentsAndServicesForProduction(t *testing.T, name string, objs [ ingresses[fmt.Sprintf("%s-query", name)] = false } - serviceAccounts := map[string]bool{} + serviceAccounts := map[string]bool{fmt.Sprintf("%s", name): false} if hasOAuthProxy { serviceAccounts[fmt.Sprintf("%s-ui-proxy", name)] = false } diff --git a/pkg/strategy/streaming_test.go b/pkg/strategy/streaming_test.go index 1554ee7e3..bb04f1dbc 100644 --- a/pkg/strategy/streaming_test.go +++ b/pkg/strategy/streaming_test.go @@ -136,7 +136,7 @@ func TestDelegateStreamingDepedencies(t *testing.T) { } func assertDeploymentsAndServicesForStreaming(t *testing.T, name string, objs []runtime.Object, hasDaemonSet bool, hasOAuthProxy bool, hasConfigMap bool) { - expectedNumObjs := 6 + expectedNumObjs := 7 if hasDaemonSet { expectedNumObjs++ diff --git a/scripts/cert_generation.sh b/scripts/cert_generation.sh new file mode 100644 index 000000000..0a60bb08d --- /dev/null +++ b/scripts/cert_generation.sh @@ -0,0 +1,264 @@ +#! /bin/bash + +# The script it taken from https://github.com/openshift/cluster-logging-operator + +WORKING_DIR=${WORKING_DIR:-/tmp/_working_dir} +NAMESPACE=${NAMESPACE:-openshift-logging} +CA_PATH=${CA_PATH:-$WORKING_DIR/ca.crt} + +REGENERATE_NEEDED=0 + +function init_cert_files() { + + if [ ! -f ${WORKING_DIR}/ca.db ]; then + touch ${WORKING_DIR}/ca.db + fi + + if [ ! -f ${WORKING_DIR}/ca.serial.txt ]; then + echo 00 > ${WORKING_DIR}/ca.serial.txt + fi +} + +function generate_signing_ca() { + if [ ! -f ${WORKING_DIR}/ca.crt ] || [ ! -f ${WORKING_DIR}/ca.key ] || ! openssl x509 -checkend 0 -noout -in ${WORKING_DIR}/ca.crt; then + openssl req -x509 \ + -new \ + -newkey rsa:2048 \ + -keyout ${WORKING_DIR}/ca.key \ + -nodes \ + -days 1825 \ + -out ${WORKING_DIR}/ca.crt \ + -subj "/CN=openshift-cluster-logging-signer" + + REGENERATE_NEEDED=1 + fi +} + +function create_signing_conf() { + cat < "${WORKING_DIR}/signing.conf" +# Simple Signing CA + +# The [default] section contains global constants that can be referred to from +# the entire configuration file. It may also hold settings pertaining to more +# than one openssl command. + +[ default ] +dir = ${WORKING_DIR} # Top dir + +# The next part of the configuration file is used by the openssl req command. +# It defines the CA's key pair, its DN, and the desired extensions for the CA +# certificate. + +[ req ] +default_bits = 2048 # RSA key size +encrypt_key = yes # Protect private key +default_md = sha1 # MD to use +utf8 = yes # Input is UTF-8 +string_mask = utf8only # Emit UTF-8 strings +prompt = no # Don't prompt for DN +distinguished_name = ca_dn # DN section +req_extensions = ca_reqext # Desired extensions + +[ ca_dn ] +0.domainComponent = "io" +1.domainComponent = "openshift" +organizationName = "OpenShift Origin" +organizationalUnitName = "Logging Signing CA" +commonName = "Logging Signing CA" + +[ ca_reqext ] +keyUsage = critical,keyCertSign,cRLSign +basicConstraints = critical,CA:true,pathlen:0 +subjectKeyIdentifier = hash + +# The remainder of the configuration file is used by the openssl ca command. +# The CA section defines the locations of CA assets, as well as the policies +# applying to the CA. + +[ ca ] +default_ca = signing_ca # The default CA section + +[ signing_ca ] +certificate = \$dir/ca.crt # The CA cert +private_key = \$dir/ca.key # CA private key +new_certs_dir = \$dir/ # Certificate archive +serial = \$dir/ca.serial.txt # Serial number file +crlnumber = \$dir/ca.crl.srl # CRL number file +database = \$dir/ca.db # Index file +unique_subject = no # Require unique subject +default_days = 730 # How long to certify for +default_md = sha1 # MD to use +policy = any_pol # Default naming policy +email_in_dn = no # Add email to cert DN +preserve = no # Keep passed DN ordering +name_opt = ca_default # Subject DN display options +cert_opt = ca_default # Certificate display options +copy_extensions = copy # Copy extensions from CSR +x509_extensions = client_ext # Default cert extensions +default_crl_days = 7 # How long before next CRL +crl_extensions = crl_ext # CRL extensions + +# Naming policies control which parts of a DN end up in the certificate and +# under what circumstances certification should be denied. + +[ match_pol ] +domainComponent = match # Must match 'simple.org' +organizationName = match # Must match 'Simple Inc' +organizationalUnitName = optional # Included if present +commonName = supplied # Must be present + +[ any_pol ] +domainComponent = optional +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = optional +emailAddress = optional + +# Certificate extensions define what types of certificates the CA is able to +# create. + +[ client_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +[ server_ext ] +keyUsage = critical,digitalSignature,keyEncipherment +basicConstraints = CA:false +extendedKeyUsage = serverAuth,clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid + +# CRL extensions exist solely to point to the CA certificate that has issued +# the CRL. + +[ crl_ext ] +authorityKeyIdentifier = keyid +EOF +} + +function sign_cert() { + local component=$1 + + openssl ca \ + -in ${WORKING_DIR}/${component}.csr \ + -notext \ + -out ${WORKING_DIR}/${component}.crt \ + -config ${WORKING_DIR}/signing.conf \ + -extensions v3_req \ + -batch \ + -extensions server_ext +} + +function generate_cert_config() { + local component=$1 + local extensions=${2:-} + + if [ "$extensions" != "" ]; then + cat < "${WORKING_DIR}/${component}.conf" +[ req ] +default_bits = 2048 +prompt = no +encrypt_key = yes +default_md = sha1 +distinguished_name = dn +req_extensions = req_ext +[ dn ] +CN = ${component} +OU = OpenShift +O = Logging +[ req_ext ] +subjectAltName = ${extensions} +EOF + else + cat < "${WORKING_DIR}/${component}.conf" +[ req ] +default_bits = 2048 +prompt = no +encrypt_key = yes +default_md = sha1 +distinguished_name = dn +[ dn ] +CN = ${component} +OU = OpenShift +O = Logging +EOF + fi +} + +function generate_request() { + local component=$1 + + openssl req -new \ + -out ${WORKING_DIR}/${component}.csr \ + -newkey rsa:2048 \ + -keyout ${WORKING_DIR}/${component}.key \ + -config ${WORKING_DIR}/${component}.conf \ + -days 712 \ + -nodes +} + +function generate_certs() { + local component=$1 + local extensions=${2:-} + + if [ $REGENERATE_NEEDED = 1 ] || [ ! -f ${WORKING_DIR}/${component}.crt ] || ! openssl x509 -checkend 0 -noout -in ${WORKING_DIR}/${component}.crt; then + generate_cert_config $component $extensions + generate_request $component + sign_cert $component + fi +} + +function generate_extensions() { + local add_oid=$1 + local add_localhost=$2 + shift + shift + local cert_names=$@ + + extension_names="" + extension_index=1 + local use_comma=0 + + if [ "$add_localhost" == "true" ]; then + extension_names="IP.1:127.0.0.1,DNS.1:localhost" + extension_index=2 + use_comma=1 + fi + + for name in ${cert_names//,/}; do + if [ $use_comma = 1 ]; then + extension_names="${extension_names},DNS.${extension_index}:${name}" + else + extension_names="DNS.${extension_index}:${name}" + use_comma=1 + fi + extension_index=$(( extension_index + 1 )) + done + + if [ "$add_oid" == "true" ]; then + extension_names="${extension_names},RID.1:1.2.3.4.5.5" + fi + + echo "$extension_names" +} + +if [ ! -d $WORKING_DIR ]; then + mkdir -p $WORKING_DIR +fi + +generate_signing_ca +init_cert_files +create_signing_conf + +generate_certs 'system.admin' +generate_certs 'system.logging.curator' + +# TODO: get es SAN DNS, IP values from es service names +generate_certs 'elasticsearch' "$(generate_extensions true true elasticsearch elasticsearch-infra elasticsearch-apps)" +generate_certs 'logging-es' "$(generate_extensions false true {elasticsearch,elasticsearch-infra,elasticsearch-apps}{,-cluster}{,.${NAMESPACE}.svc.cluster.local})" diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go index 1717ea105..d5ec7b900 100644 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -1,14 +1,14 @@ -// Code generated by go-bindata. +// Code generated by go-bindata. DO NOT EDIT. // sources: -// schemas/jsonschema-draft-04.json -// schemas/v2/schema.json -// DO NOT EDIT! +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.249kB) package spec import ( "bytes" "compress/gzip" + "crypto/sha256" "fmt" "io" "io/ioutil" @@ -39,8 +39,9 @@ func bindataRead(data []byte, name string) ([]byte, error) { } type asset struct { - bytes []byte - info os.FileInfo + bytes []byte + info os.FileInfo + digest [sha256.Size]byte } type bindataFileInfo struct { @@ -84,8 +85,8 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(420), modTime: time.Unix(1523760398, 0)} - a := &asset{bytes: bytes, info: info} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} return a, nil } @@ -104,8 +105,8 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1523760397, 0)} - a := &asset{bytes: bytes, info: info} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} return a, nil } @@ -113,8 +114,8 @@ func v2SchemaJSON() (*asset, error) { // It returns an error if the asset could not be found or // could not be loaded. func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) @@ -124,6 +125,12 @@ func Asset(name string) ([]byte, error) { return nil, fmt.Errorf("Asset %s not found", name) } +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + // MustAsset is like Asset but panics when Asset would return an error. // It simplifies safe initialization of global variables. func MustAsset(name string) []byte { @@ -135,12 +142,18 @@ func MustAsset(name string) []byte { return a } +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + // AssetInfo loads and returns the asset info for the given name. // It returns an error if the asset could not be found or // could not be loaded. func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) @@ -150,6 +163,33 @@ func AssetInfo(name string) (os.FileInfo, error) { return nil, fmt.Errorf("AssetInfo %s not found", name) } +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + // AssetNames returns the names of the assets. func AssetNames() []string { names := make([]string, 0, len(_bindata)) @@ -162,7 +202,8 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, + + "v2/schema.json": v2SchemaJSON, } // AssetDir returns the file names below a certain @@ -174,15 +215,15 @@ var _bindata = map[string]func() (*asset, error){ // img/ // a.png // b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and // AssetDir("") will return []string{"data"}. func AssetDir(name string) ([]string, error) { node := _bintree if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") for _, p := range pathList { node = node.Children[p] if node == nil { @@ -212,7 +253,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ }}, }} -// RestoreAsset restores an asset under the given directory +// RestoreAsset restores an asset under the given directory. func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { @@ -230,14 +271,10 @@ func RestoreAsset(dir, name string) error { if err != nil { return err } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) } -// RestoreAssets restores an asset under the given directory recursively +// RestoreAssets restores an asset under the given directory recursively. func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File @@ -255,6 +292,6 @@ func RestoreAssets(dir, name string) error { } func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) } diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 000000000..3fada0dae --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,60 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "sync" + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + debugLog("getting %q from resolution cache", uri) + s.lock.RLock() + v, ok := s.store[uri] + debugLog("got %q from resolution cache: %t", uri, ok) + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var resCache ResolutionCache + +func init() { + resCache = initResolutionCache() +} + +// initResolutionCache initializes the URI resolution cache +func initResolutionCache() ResolutionCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go index 7edb95a61..389c528ff 100644 --- a/vendor/github.com/go-openapi/spec/debug.go +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -24,9 +24,9 @@ import ( var ( // Debug is true when the SWAGGER_DEBUG env var is not empty. - // It enables a more verbose logging of validators. + // It enables a more verbose logging of this package. Debug = os.Getenv("SWAGGER_DEBUG") != "" - // validateLogger is a debug logger for this package + // specLogger is a debug logger for this package specLogger *log.Logger ) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index 456a9dd7e..1e7fc8c49 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -17,20 +17,10 @@ package spec import ( "encoding/json" "fmt" - "log" - "net/url" - "os" - "path" - "path/filepath" - "reflect" "strings" - "sync" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" ) -// ExpandOptions provides options for expand. +// ExpandOptions provides options for spec expand type ExpandOptions struct { RelativeBase string SkipSchemas bool @@ -38,68 +28,6 @@ type ExpandOptions struct { AbsoluteCircularRef bool } -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.RWMutex - store map[string]interface{} -} - -var resCache ResolutionCache - -func init() { - resCache = initResolutionCache() -} - -// initResolutionCache initializes the URI resolution cache -func initResolutionCache() ResolutionCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -// resolverContext allows to share a context during spec processing. -// At the moment, it just holds the index of circular references found. -type resolverContext struct { - // circulars holds all visited circular references, which allows shortcuts. - // NOTE: this is not just a performance improvement: it is required to figure out - // circular references which participate several cycles. - // This structure is privately instantiated and needs not be locked against - // concurrent access, unless we chose to implement a parallel spec walking. - circulars map[string]bool - basePath string -} - -func newResolverContext(originalBasePath string) *resolverContext { - return &resolverContext{ - circulars: make(map[string]bool), - basePath: originalBasePath, // keep the root base path in context - } -} - -// Get retrieves a cached URI -func (s *simpleCache) Get(uri string) (interface{}, bool) { - debugLog("getting %q from resolution cache", uri) - s.lock.RLock() - v, ok := s.store[uri] - debugLog("got %q from resolution cache: %t", uri, ok) - - s.lock.RUnlock() - return v, ok -} - -// Set caches a URI -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - // ResolveRefWithBase resolves a reference against a context root with preservation of base path func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { resolver, err := defaultSchemaLoader(root, opts, nil, nil) @@ -179,7 +107,10 @@ func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*R return result, nil } -// ResolveItems resolves header and parameter items reference against a context root and base path +// ResolveItems resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { @@ -213,341 +144,11 @@ func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, return result, nil } -type schemaLoader struct { - root interface{} - options *ExpandOptions - cache ResolutionCache - context *resolverContext - loadDoc func(string) (json.RawMessage, error) -} - -var idPtr, _ = jsonpointer.New("/id") -var refPtr, _ = jsonpointer.New("/$ref") - -// PathLoader function to use when loading remote refs -var PathLoader func(string) (json.RawMessage, error) - -func init() { - PathLoader = func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - } -} - -func defaultSchemaLoader( - root interface{}, - expandOptions *ExpandOptions, - cache ResolutionCache, - context *resolverContext) (*schemaLoader, error) { - - if cache == nil { - cache = resCache - } - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - absBase, _ := absPath(expandOptions.RelativeBase) - if context == nil { - context = newResolverContext(absBase) - } - return &schemaLoader{ - root: root, - options: expandOptions, - cache: cache, - context: context, - loadDoc: func(path string) (json.RawMessage, error) { - debugLog("fetching document at %q", path) - return PathLoader(path) - }, - }, nil -} - -func idFromNode(node interface{}) (*Ref, error) { - if idValue, _, err := idPtr.Get(node); err == nil { - if refStr, ok := idValue.(string); ok && refStr != "" { - idRef, err := NewRef(refStr) - if err != nil { - return nil, err - } - return &idRef, nil - } - } - return nil, nil -} - -func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { - if startingRef == nil { - return nil - } - - if ptr == nil { - return startingRef - } - - ret := startingRef - var idRef *Ref - node := startingNode - - for _, tok := range ptr.DecodedTokens() { - node, _, _ = jsonpointer.GetForToken(node, tok) - if node == nil { - break - } - - idRef, _ = idFromNode(node) - if idRef != nil { - nw, err := ret.Inherits(*idRef) - if err != nil { - break - } - ret = nw - } - - refRef, _, _ := refPtr.Get(node) - if refRef != nil { - var rf Ref - switch value := refRef.(type) { - case string: - rf, _ = NewRef(value) - } - nw, err := ret.Inherits(rf) - if err != nil { - break - } - nwURL := nw.GetURL() - if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { - nwpt := filepath.ToSlash(nwURL.Path) - if filepath.IsAbs(nwpt) { - _, err := os.Stat(nwpt) - if err != nil { - nwURL.Path = filepath.Join(".", nwpt) - } - } - } - - ret = nw - } - - } - - return ret -} - -// normalize absolute path for cache. -// on Windows, drive letters should be converted to lower as scheme in net/url.URL -func normalizeAbsPath(path string) string { - u, err := url.Parse(path) - if err != nil { - debugLog("normalize absolute path failed: %s", err) - return path - } - return u.String() -} - -// base or refPath could be a file path or a URL -// given a base absolute path and a ref path, return the absolute path of refPath -// 1) if refPath is absolute, return it -// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists -// base could be a directory or a full file path -func normalizePaths(refPath, base string) string { - refURL, _ := url.Parse(refPath) - if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { - // refPath is actually absolute - if refURL.Host != "" { - return refPath - } - parts := strings.Split(refPath, "#") - result := filepath.FromSlash(parts[0]) - if len(parts) == 2 { - result += "#" + parts[1] - } - return result - } - - // relative refPath - baseURL, _ := url.Parse(base) - if !strings.HasPrefix(refPath, "#") { - // combining paths - if baseURL.Host != "" { - baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) - } else { // base is a file - newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) - return newBase - } - - } - // copying fragment from ref to base - baseURL.Fragment = refURL.Fragment - return baseURL.String() -} - -// denormalizePaths returns to simplest notation on file $ref, -// i.e. strips the absolute path and sets a path relative to the base path. -// -// This is currently used when we rewrite ref after a circular ref has been detected -func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { - debugLog("denormalizeFileRef for: %s", ref.String()) - - if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { - return ref - } - // strip relativeBase from URI - relativeBaseURL, _ := url.Parse(relativeBase) - relativeBaseURL.Fragment = "" - - if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { - // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix - r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) - return &r - } - - if relativeBaseURL.IsAbs() { - // other absolute URL get unchanged (i.e. with a non-empty scheme) - return ref - } - - // for relative file URIs: - originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) - originalRelativeBaseURL.Fragment = "" - if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { - // the resulting ref is in the expanded spec: return a local ref - r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) - return &r - } - - // check if we may set a relative path, considering the original base path for this spec. - // Example: - // spec is located at /mypath/spec.json - // my normalized ref points to: /mypath/item.json#/target - // expected result: item.json#/target - parts := strings.Split(ref.String(), "#") - relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) - if err != nil { - // there is no common ancestor (e.g. different drives on windows) - // leaves the ref unchanged - return ref - } - if len(parts) == 2 { - relativePath += "#" + parts[1] - } - r, _ := NewRef(relativePath) - return &r -} - -// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL -func normalizeFileRef(ref *Ref, relativeBase string) *Ref { - // This is important for when the reference is pointing to the root schema - if ref.String() == "" { - r, _ := NewRef(relativeBase) - return &r - } - - debugLog("normalizing %s against %s", ref.String(), relativeBase) - - s := normalizePaths(ref.String(), relativeBase) - r, _ := NewRef(s) - return &r -} - -func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return fmt.Errorf("resolve ref: target needs to be a pointer") - } - - refURL := ref.GetURL() - if refURL == nil { - return nil - } - - var res interface{} - var data interface{} - var err error - // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means - // it is pointing somewhere in the root. - root := r.root - if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) - } - } - if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { - data = root - } else { - baseRef := normalizeFileRef(ref, basePath) - debugLog("current ref is: %s", ref.String()) - debugLog("current ref normalized file: %s", baseRef.String()) - data, _, _, err = r.load(baseRef.GetURL()) - if err != nil { - return err - } - } - - res = data - if ref.String() != "" { - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err - } - - return nil -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - normalized := normalizeAbsPath(toFetch.String()) - - data, fromCache := r.cache.Get(normalized) - if !fromCache { - b, err := r.loadDoc(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - if err := json.Unmarshal(b, &data); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(normalized, data) - } - - return data, toFetch, fromCache, nil -} - -// Resolve resolves a reference against basePath and stores the result in target -// Resolve is not in charge of following references, it only resolves ref by following its URL -// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them -// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct -func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { - return r.resolveRef(ref, target, basePath) -} - -// absPath returns the absolute path of a file -func absPath(fname string) (string, error) { - if strings.HasPrefix(fname, "http") { - return fname, nil - } - if filepath.IsAbs(fname) { - return fname, nil - } - wd, err := os.Getwd() - return filepath.Join(wd, fname), err -} - // ExpandSpec expands the references in a swagger spec func ExpandSpec(spec *Swagger, options *ExpandOptions) error { resolver, err := defaultSchemaLoader(spec, options, nil, nil) // Just in case this ever returns an error. - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return err } @@ -561,7 +162,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { for key, definition := range spec.Definitions { var def *Schema var err error - if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } if def != nil { @@ -570,23 +171,26 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { } } - for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Parameters[key] = parameter } - for key, response := range spec.Responses { - if err := expandResponse(&response, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Responses[key] = response } if spec.Paths != nil { - for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + for key := range spec.Paths.Paths { + path := spec.Paths.Paths[key] + if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Paths.Paths[key] = path @@ -596,18 +200,6 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { return nil } -func shouldStopOnError(err error, opts *ExpandOptions) bool { - if err != nil && !opts.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - // baseForRoot loads in the cache the root document and produces a fake "root" base path entry // for further $ref resolution func baseForRoot(root interface{}, cache ResolutionCache) string { @@ -686,52 +278,6 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, bas return &target, nil } -// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID -func basePathFromSchemaID(oldBasePath, id string) string { - u, err := url.Parse(oldBasePath) - if err != nil { - panic(err) - } - uid, err := url.Parse(id) - if err != nil { - panic(err) - } - - if path.IsAbs(uid.Path) { - return id - } - u.Path = path.Join(path.Dir(u.Path), uid.Path) - return u.String() -} - -// isCircular detects cycles in sequences of $ref. -// It relies on a private context (which needs not be locked). -func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { - normalizedRef := normalizePaths(ref.String(), basePath) - if _, ok := r.context.circulars[normalizedRef]; ok { - // circular $ref has been already detected in another explored cycle - foundCycle = true - return - } - foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) - if foundCycle { - r.context.circulars[normalizedRef] = true - } - return -} - -func updateBasePath(transitive *schemaLoader, resolver *schemaLoader, basePath string) string { - if transitive != resolver { - debugLog("got a new resolver") - if transitive.options != nil && transitive.options.RelativeBase != "" { - basePath, _ = absPath(transitive.options.RelativeBase) - debugLog("new basePath = %s", basePath) - } - } - - return basePath -} - func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { if target.Ref.String() == "" && target.Ref.IsRoot() { // normalizing is important @@ -741,8 +287,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } - /* change the base path of resolution when an ID is encountered - otherwise the basePath should inherit the parent's */ + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's // important: ID can be relative path if target.ID != "" { debugLog("schema has ID: %s", target.ID) @@ -756,12 +302,11 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba basePath = normalizePaths(refPath, basePath) } - /* Explain here what this function does */ var t *Schema - /* if Ref is found, everything else doesn't matter */ - /* Ref also changes the resolution scope of children expandSchema */ + // if Ref is found, everything else doesn't matter + // Ref also changes the resolution scope of children expandSchema if target.Ref.String() != "" { - /* Here the resolution scope is changed because a $ref was encountered */ + // here the resolution scope is changed because a $ref was encountered normalizedRef := normalizeFileRef(&target.Ref, basePath) normalizedBasePath := normalizedRef.RemoteURI() @@ -779,31 +324,27 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba return &target, nil } - debugLog("basePath: %s", basePath) - if Debug { - b, _ := json.Marshal(target) - debugLog("calling Resolve with target: %s", string(b)) - } - if err := resolver.Resolve(&target.Ref, &t, basePath); shouldStopOnError(err, resolver.options) { + debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target) + if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) { return nil, err } if t != nil { parentRefs = append(parentRefs, normalizedRef.String()) var err error - transitiveResolver, err := transitiveResolver(basePath, target.Ref, resolver) - if shouldStopOnError(err, resolver.options) { + transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref) + if transitiveResolver.shouldStopOnError(err) { return nil, err } - basePath = updateBasePath(transitiveResolver, resolver, normalizedBasePath) + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) return expandSchema(*t, parentRefs, transitiveResolver, basePath) } } t, err := expandItems(target, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -812,21 +353,21 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba for i := range target.AllOf { t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } target.AllOf[i] = *t } for i := range target.AnyOf { t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } target.AnyOf[i] = *t } for i := range target.OneOf { t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -835,7 +376,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.Not != nil { t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -844,7 +385,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } for k := range target.Properties { t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -853,7 +394,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -862,7 +403,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } for k := range target.PatternProperties { t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -872,7 +413,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba for k := range target.Dependencies { if target.Dependencies[k].Schema != nil { t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -882,7 +423,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -891,7 +432,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } for k := range target.Definitions { t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -901,75 +442,42 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba return &target, nil } -func derefPathItem(pathItem *PathItem, parentRefs []string, resolver *schemaLoader, basePath string) error { - curRef := pathItem.Ref.String() - if curRef != "" { - normalizedRef := normalizeFileRef(&pathItem.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := resolver.Resolve(&pathItem.Ref, pathItem, basePath); shouldStopOnError(err, resolver.options) { - return err - } - - if pathItem.Ref.String() != "" && pathItem.Ref.String() != curRef && basePath != normalizedBasePath { - parentRefs = append(parentRefs, normalizedRef.String()) - return derefPathItem(pathItem, parentRefs, resolver, normalizedBasePath) - } - } - - return nil -} - func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { if pathItem == nil { return nil } parentRefs := []string{} - if err := derefPathItem(pathItem, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { return err } if pathItem.Ref.String() != "" { var err error - resolver, err = transitiveResolver(basePath, pathItem.Ref, resolver) - if shouldStopOnError(err, resolver.options) { + resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref) + if resolver.shouldStopOnError(err) { return err } } pathItem.Ref = Ref{} - // Currently unused: - //parentRefs = parentRefs[0:] - for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) { return err } } - if err := expandOperation(pathItem.Get, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, } - if err := expandOperation(pathItem.Head, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Options, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Put, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Post, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Patch, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Delete, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } } return nil } @@ -979,8 +487,9 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err return nil } - for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver, basePath); shouldStopOnError(err, resolver.options) { + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { return err } op.Parameters[i] = param @@ -988,11 +497,12 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err if op.Responses != nil { responses := op.Responses - if err := expandResponse(responses.Default, resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { return err } - for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver, basePath); shouldStopOnError(err, resolver.options) { + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { return err } responses.StatusCodeResponses[code] = response @@ -1001,34 +511,6 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err return nil } -func transitiveResolver(basePath string, ref Ref, resolver *schemaLoader) (*schemaLoader, error) { - if ref.IsRoot() || ref.HasFragmentOnly { - return resolver, nil - } - - baseRef, _ := NewRef(basePath) - currentRef := normalizeFileRef(&ref, basePath) - // Set a new root to resolve against - if !strings.HasPrefix(currentRef.String(), baseRef.String()) { - rootURL := currentRef.GetURL() - rootURL.Fragment = "" - root, _ := resolver.cache.Get(rootURL.String()) - var err error - - // shallow copy of resolver options to set a new RelativeBase when - // traversing multiple documents - newOptions := resolver.options - newOptions.RelativeBase = rootURL.String() - debugLog("setting new root: %s", newOptions.RelativeBase) - resolver, err = defaultSchemaLoader(root, newOptions, resolver.cache, resolver.context) - if err != nil { - return nil, err - } - } - - return resolver, nil -} - // ExpandResponseWithRoot expands a response based on a root document, not a fetchable document func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { opts := &ExpandOptions{ @@ -1043,7 +525,7 @@ func ExpandResponseWithRoot(response *Response, root interface{}, cache Resoluti return err } - return expandResponse(response, resolver, opts.RelativeBase) + return expandParameterOrResponse(response, resolver, opts.RelativeBase) } // ExpandResponse expands a response based on a basepath @@ -1062,70 +544,7 @@ func ExpandResponse(response *Response, basePath string) error { return err } - return expandResponse(response, resolver, opts.RelativeBase) -} - -func derefResponse(response *Response, parentRefs []string, resolver *schemaLoader, basePath string) error { - curRef := response.Ref.String() - if curRef != "" { - /* Here the resolution scope is changed because a $ref was encountered */ - normalizedRef := normalizeFileRef(&response.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := resolver.Resolve(&response.Ref, response, basePath); shouldStopOnError(err, resolver.options) { - return err - } - - if response.Ref.String() != "" && response.Ref.String() != curRef && basePath != normalizedBasePath { - parentRefs = append(parentRefs, normalizedRef.String()) - return derefResponse(response, parentRefs, resolver, normalizedBasePath) - } - } - - return nil -} - -func expandResponse(response *Response, resolver *schemaLoader, basePath string) error { - if response == nil { - return nil - } - parentRefs := []string{} - if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if response.Ref.String() != "" { - transitiveResolver, err := transitiveResolver(basePath, response.Ref, resolver) - if shouldStopOnError(err, transitiveResolver.options) { - return err - } - basePath = updateBasePath(transitiveResolver, resolver, basePath) - resolver = transitiveResolver - } - if response.Schema != nil && response.Schema.Ref.String() != "" { - // schema expanded to a $ref in another root - var ern error - response.Schema.Ref, ern = NewRef(normalizePaths(response.Schema.Ref.String(), response.Ref.RemoteURI())) - if ern != nil { - return ern - } - } - response.Ref = Ref{} - - parentRefs = parentRefs[0:] - if !resolver.options.SkipSchemas && response.Schema != nil { - // parentRefs = append(parentRefs, response.Schema.Ref.String()) - s, err := expandSchema(*response.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { - return err - } - *response.Schema = *s - } - - return nil + return expandParameterOrResponse(response, resolver, opts.RelativeBase) } // ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document @@ -1142,10 +561,10 @@ func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache Resol return err } - return expandParameter(parameter, resolver, opts.RelativeBase) + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) } -// ExpandParameter expands a parameter based on a basepath +// ExpandParameter expands a parameter based on a basepath. // This is the exported version of expandParameter // all refs inside parameter will be resolved relative to basePath func ExpandParameter(parameter *Parameter, basePath string) error { @@ -1161,67 +580,71 @@ func ExpandParameter(parameter *Parameter, basePath string) error { return err } - return expandParameter(parameter, resolver, opts.RelativeBase) + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) } -func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaLoader, basePath string) error { - curRef := parameter.Ref.String() - if curRef != "" { - normalizedRef := normalizeFileRef(¶meter.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := resolver.Resolve(¶meter.Ref, parameter, basePath); shouldStopOnError(err, resolver.options) { - return err +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ref *Ref + var sch *Schema + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil } - - if parameter.Ref.String() != "" && parameter.Ref.String() != curRef && basePath != normalizedBasePath { - parentRefs = append(parentRefs, normalizedRef.String()) - return derefParameter(parameter, parentRefs, resolver, normalizedBasePath) + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input) } - - return nil + return ref, sch, nil } -func expandParameter(parameter *Parameter, resolver *schemaLoader, basePath string) error { - if parameter == nil { +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + if ref == nil { return nil } - parentRefs := []string{} - if err := derefParameter(parameter, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { return err } - if parameter.Ref.String() != "" { - transitiveResolver, err := transitiveResolver(basePath, parameter.Ref, resolver) - if shouldStopOnError(err, transitiveResolver.options) { + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver, err := resolver.transitiveResolver(basePath, *ref) + if transitiveResolver.shouldStopOnError(err) { return err } - basePath = updateBasePath(transitiveResolver, resolver, basePath) + basePath = resolver.updateBasePath(transitiveResolver, basePath) resolver = transitiveResolver } - if parameter.Schema != nil && parameter.Schema.Ref.String() != "" { + if sch != nil && sch.Ref.String() != "" { // schema expanded to a $ref in another root var ern error - parameter.Schema.Ref, ern = NewRef(normalizePaths(parameter.Schema.Ref.String(), parameter.Ref.RemoteURI())) + sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI())) if ern != nil { return ern } } - parameter.Ref = Ref{} + if ref != nil { + *ref = Ref{} + } - parentRefs = parentRefs[0:] - if !resolver.options.SkipSchemas && parameter.Schema != nil { - s, err := expandSchema(*parameter.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if !resolver.options.SkipSchemas && sch != nil { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return err } - *parameter.Schema = *s + *sch = *s } return nil } diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index 82f77f770..39efe452b 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -22,6 +22,10 @@ import ( "github.com/go-openapi/swag" ) +const ( + jsonArray = "array" +) + // HeaderProps describes a response header type HeaderProps struct { Description string `json:"description,omitempty"` @@ -57,7 +61,7 @@ func (h *Header) Typed(tpe, format string) *Header { // CollectionOf a fluent builder method for an array item func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = "array" + h.Type = jsonArray h.Items = items h.CollectionFormat = format return h diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index cfb37ec12..c458b49b2 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -161,8 +161,5 @@ func (i *Info) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &i.InfoProps); err != nil { return err } - if err := json.Unmarshal(data, &i.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &i.VendorExtensible) } diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index cf4298971..78389317e 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -22,6 +22,10 @@ import ( "github.com/go-openapi/swag" ) +const ( + jsonRef = "$ref" +) + // SimpleSchema describe swagger simple schemas for parameters and headers type SimpleSchema struct { Type string `json:"type,omitempty"` @@ -89,7 +93,7 @@ func (i *Items) Typed(tpe, format string) *Items { // CollectionOf a fluent builder method for an array item func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = "array" + i.Type = jsonArray i.Items = items i.CollectionFormat = format return i @@ -217,7 +221,7 @@ func (i Items) MarshalJSON() ([]byte, error) { // JSONLookup look up a value by the json property name func (i Items) JSONLookup(token string) (interface{}, error) { - if token == "$ref" { + if token == jsonRef { return &i.Ref, nil } diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 000000000..b8957e7c0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// normalize absolute path for cache. +// on Windows, drive letters should be converted to lower as scheme in net/url.URL +func normalizeAbsPath(path string) string { + u, err := url.Parse(path) + if err != nil { + debugLog("normalize absolute path failed: %s", err) + return path + } + return u.String() +} + +// base or refPath could be a file path or a URL +// given a base absolute path and a ref path, return the absolute path of refPath +// 1) if refPath is absolute, return it +// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists +// base could be a directory or a full file path +func normalizePaths(refPath, base string) string { + refURL, _ := url.Parse(refPath) + if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { + // refPath is actually absolute + if refURL.Host != "" { + return refPath + } + parts := strings.Split(refPath, "#") + result := filepath.FromSlash(parts[0]) + if len(parts) == 2 { + result += "#" + parts[1] + } + return result + } + + // relative refPath + baseURL, _ := url.Parse(base) + if !strings.HasPrefix(refPath, "#") { + // combining paths + if baseURL.Host != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } else { // base is a file + newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) + return newBase + } + + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + return baseURL.String() +} + +// denormalizePaths returns to simplest notation on file $ref, +// i.e. strips the absolute path and sets a path relative to the base path. +// +// This is currently used when we rewrite ref after a circular ref has been detected +func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { + debugLog("denormalizeFileRef for: %s", ref.String()) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + return ref + } + // strip relativeBase from URI + relativeBaseURL, _ := url.Parse(relativeBase) + relativeBaseURL.Fragment = "" + + if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { + // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix + r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) + return &r + } + + if relativeBaseURL.IsAbs() { + // other absolute URL get unchanged (i.e. with a non-empty scheme) + return ref + } + + // for relative file URIs: + originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) + originalRelativeBaseURL.Fragment = "" + if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { + // the resulting ref is in the expanded spec: return a local ref + r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) + return &r + } + + // check if we may set a relative path, considering the original base path for this spec. + // Example: + // spec is located at /mypath/spec.json + // my normalized ref points to: /mypath/item.json#/target + // expected result: item.json#/target + parts := strings.Split(ref.String(), "#") + relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) + if err != nil { + // there is no common ancestor (e.g. different drives on windows) + // leaves the ref unchanged + return ref + } + if len(parts) == 2 { + relativePath += "#" + parts[1] + } + r, _ := NewRef(relativePath) + return &r +} + +// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL +func normalizeFileRef(ref *Ref, relativeBase string) *Ref { + // This is important for when the reference is pointing to the root schema + if ref.String() == "" { + r, _ := NewRef(relativeBase) + return &r + } + + debugLog("normalizing %s against %s", ref.String(), relativeBase) + + s := normalizePaths(ref.String(), relativeBase) + r, _ := NewRef(s) + return &r +} + +// absPath returns the absolute path of a file +func absPath(fname string) (string, error) { + if strings.HasPrefix(fname, "http") { + return fname, nil + } + if filepath.IsAbs(fname) { + return fname, nil + } + wd, err := os.Getwd() + return filepath.Join(wd, fname), err +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 32f7d8fe7..b1ebd5994 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -15,24 +15,37 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" + "sort" "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) +func init() { + //gob.Register(map[string][]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + // OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function type OperationProps struct { Description string `json:"description,omitempty"` Consumes []string `json:"consumes,omitempty"` Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Schemes []string `json:"schemes,omitempty"` Tags []string `json:"tags,omitempty"` Summary string `json:"summary,omitempty"` ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` ID string `json:"operationId,omitempty"` Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` //Special case, see MarshalJSON function + Security []map[string][]string `json:"security,omitempty"` Parameters []Parameter `json:"parameters,omitempty"` Responses *Responses `json:"responses,omitempty"` } @@ -76,11 +89,17 @@ func (o *Operation) SuccessResponse() (*Response, int, bool) { return nil, 0, false } - for k, v := range o.Responses.StatusCodeResponses { - if k/100 == 2 { - return &v, k, true + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) } } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } return o.Responses.Default, 0, false } @@ -99,10 +118,7 @@ func (o *Operation) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } - if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &o.VendorExtensible) } // MarshalJSON converts this items object to JSON @@ -216,7 +232,7 @@ func (o *Operation) AddParam(param *Parameter) *Operation { // RemoveParam removes a parameter from the operation func (o *Operation) RemoveParam(name, in string) *Operation { for i, p := range o.Parameters { - if p.Name == name && p.In == name { + if p.Name == name && p.In == in { o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) return o } @@ -257,3 +273,126 @@ func (o *Operation) RespondsWith(code int, response *Response) *Operation { o.Responses.StatusCodeResponses[code] = *response return o } + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index cb1a88d25..cecdff545 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -39,7 +39,8 @@ func PathParam(name string) *Parameter { // BodyParam creates a body parameter func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, + SimpleSchema: SimpleSchema{Type: "object"}} } // FormDataParam creates a body parameter @@ -49,12 +50,15 @@ func FormDataParam(name string) *Parameter { // FileParam creates a body parameter func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} } // SimpleArrayParam creates a param for a simple array (string, int, date etc) func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} } // ParamRef creates a parameter that's a json reference @@ -65,25 +69,43 @@ func ParamRef(uri string) *Parameter { } // ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" type ParamProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` In string `json:"in,omitempty"` Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` // when in == "body" - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. // * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. // * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { @@ -99,7 +121,7 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } @@ -148,7 +170,7 @@ func (p *Parameter) Typed(tpe, format string) *Parameter { // CollectionOf a fluent builder method for an array parameter func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = "array" + p.Type = jsonArray p.Items = items p.CollectionFormat = format return p @@ -270,10 +292,7 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.ParamProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.ParamProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go index a8ae63ece..68fc8e901 100644 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -50,7 +50,7 @@ func (p PathItem) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) @@ -65,10 +65,7 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.PathItemProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.PathItemProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 1405bfd8e..08ff869b2 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "net/http" "os" @@ -148,6 +150,28 @@ func (r *Ref) UnmarshalJSON(d []byte) error { return r.fromMap(v) } +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + func (r *Ref) fromMap(v map[string]interface{}) error { if v == nil { return nil diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index 586db0d78..27729c1d9 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -58,10 +58,7 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.Refable); err != nil { return err } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &r.VendorExtensible) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index b9481e29b..ce30d26e3 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -89,7 +89,8 @@ func DateTimeProperty() *Schema { // MapProperty creates a map property func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} } // RefProperty creates a ref property @@ -155,54 +156,6 @@ func (r *SchemaURL) fromMap(v map[string]interface{}) error { return nil } -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } - // SchemaProps describes a JSON schema (draft 4) type SchemaProps struct { ID string `json:"id,omitempty"` @@ -351,7 +304,7 @@ func (s *Schema) AddType(tpe, format string) *Schema { // CollectionOf a fluent builder method for an array parameter func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{"array"} + s.Type = []string{jsonArray} s.Items = &SchemaOrArray{Schema: &items} return s } diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 000000000..c34a96fa0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, which allows shortcuts. + // NOTE: this is not just a performance improvement: it is required to figure out + // circular references which participate several cycles. + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string +} + +func newResolverContext(originalBasePath string) *resolverContext { + return &resolverContext{ + circulars: make(map[string]bool), + basePath: originalBasePath, // keep the root base path in context + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext + loadDoc func(string) (json.RawMessage, error) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return r, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r, nil + } + + // Set a new root to resolve against + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + debugLog("setting new root: %s", newOptions.RelativeBase) + resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) + if err != nil { + return nil, err + } + + return resolver, nil +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + debugLog("got a new resolver") + if transitive.options != nil && transitive.options.RelativeBase != "" { + basePath, _ = absPath(transitive.options.RelativeBase) + debugLog("new basePath = %s", basePath) + } + } + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + refURL := ref.GetURL() + if refURL == nil { + return nil + } + + var res interface{} + var data interface{} + var err error + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeFileRef(ref, basePath) + debugLog("current ref is: %s", ref.String()) + debugLog("current ref normalized file: %s", baseRef.String()) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) + if !fromCache { + b, err := r.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, data) + } + + return data, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizePaths(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target +// Resolve is not in charge of following references, it only resolves ref by following its URL +// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them +// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("deref: unsupported type %T", input) + } + + curRef := ref.String() + if curRef != "" { + normalizedRef := normalizeFileRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + // NOTE(fredbi): removed basePath check => needs more testing + if ref.String() != "" && ref.String() != curRef { + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) + } + } + + return nil +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) (*schemaLoader, error) { + + if cache == nil { + cache = resCache + } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + absBase, _ := absPath(expandOptions.RelativeBase) + if context == nil { + context = newResolverContext(absBase) + } + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + loadDoc: func(path string) (json.RawMessage, error) { + debugLog("fetching document at %q", path) + return PathLoader(path) + }, + }, nil +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go index 9f1b454ea..fe353842a 100644 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -136,8 +136,5 @@ func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { return err } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &s.VendorExtensible) } diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 4586a21c8..454617e58 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -24,7 +24,8 @@ import ( ) // Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. // // For more information: http://goo.gl/8us55a#swagger-object- type Swagger struct { @@ -68,16 +69,21 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { } // SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required type SwaggerProps struct { ID string `json:"id,omitempty"` Consumes []string `json:"consumes,omitempty"` Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Schemes []string `json:"schemes,omitempty"` Swagger string `json:"swagger,omitempty"` Info *Info `json:"info,omitempty"` Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` // must start with a leading "/" - Paths *Paths `json:"paths"` // required + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` Definitions Definitions `json:"definitions,omitempty"` Parameters map[string]Parameter `json:"parameters,omitempty"` Responses map[string]Response `json:"responses,omitempty"` @@ -244,9 +250,9 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { if single == nil { return nil } - switch single.(type) { + switch v := single.(type) { case string: - *s = StringOrArray([]string{single.(string)}) + *s = StringOrArray([]string{v}) return nil default: return fmt.Errorf("only string or array is allowed, not %T", single) diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go index 25256c4be..faa3d3de1 100644 --- a/vendor/github.com/go-openapi/spec/tag.go +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -30,10 +30,11 @@ type TagProps struct { // NewTag creates a new tag func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{description, name, externalDocs}} + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} } -// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). // It is not mandatory to have a Tag Object per tag used there. // // For more information: http://goo.gl/8us55a#tagObject diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go new file mode 100644 index 000000000..aa12b56f6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/unused.go @@ -0,0 +1,174 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +/* + +import ( + "net/url" + "os" + "path" + "path/filepath" + + "github.com/go-openapi/jsonpointer" +) + + // Some currently unused functions and definitions that + // used to be part of the expander. + + // Moved here for the record and possible future reuse + +var ( + idPtr, _ = jsonpointer.New("/id") + refPtr, _ = jsonpointer.New("/$ref") +) + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } + nw, err := ret.Inherits(rf) + if err != nil { + break + } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + + ret = nw + } + + } + + return ret +} + +// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID +func basePathFromSchemaID(oldBasePath, id string) string { + u, err := url.Parse(oldBasePath) + if err != nil { + panic(err) + } + uid, err := url.Parse(id) + if err != nil { + panic(err) + } + + if path.IsAbs(uid.Path) { + return id + } + u.Path = path.Join(path.Dir(u.Path), uid.Path) + return u.String() +} +*/ + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 7d5534b24..0d235876d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -59,12 +59,6 @@ type Config struct { // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. DefaultResponse *spec.Response - // ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object - // that holds responses definitions that can be used across operations. This property does not define - // global responses for all operations. For more info please refer: - // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields - ResponseDefinitions map[string]spec.Response - // CommonResponses will be added as a response to all operation specs. This is a good place to add common // responses such as authorization failed. CommonResponses map[int]spec.Response diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go index f73285887..9270d2632 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go @@ -17,114 +17,16 @@ limitations under the License. package generators import ( - "bytes" "fmt" "io" - "io/ioutil" - "os" - "sort" "k8s.io/kube-openapi/pkg/generators/rules" - "k8s.io/gengo/generator" + "github.com/golang/glog" "k8s.io/gengo/types" - "k8s.io/klog" ) -const apiViolationFileType = "api-violation" - -type apiViolationFile struct { - // Since our file actually is unrelated to the package structure, use a - // path that hasn't been mangled by the framework. - unmangledPath string -} - -func (a apiViolationFile) AssembleFile(f *generator.File, path string) error { - path = a.unmangledPath - klog.V(2).Infof("Assembling file %q", path) - if path == "-" { - _, err := io.Copy(os.Stdout, &f.Body) - return err - } - - output, err := os.Create(path) - if err != nil { - return err - } - defer output.Close() - _, err = io.Copy(output, &f.Body) - return err -} - -func (a apiViolationFile) VerifyFile(f *generator.File, path string) error { - if path == "-" { - // Nothing to verify against. - return nil - } - path = a.unmangledPath - - formatted := f.Body.Bytes() - existing, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("unable to read file %q for comparison: %v", path, err) - } - if bytes.Compare(formatted, existing) == 0 { - return nil - } - - // Be nice and find the first place where they differ - // (Copied from gengo's default file type) - i := 0 - for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] { - i++ - } - eDiff, fDiff := existing[i:], formatted[i:] - if len(eDiff) > 100 { - eDiff = eDiff[:100] - } - if len(fDiff) > 100 { - fDiff = fDiff[:100] - } - return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", path, string(eDiff), string(fDiff)) -} - -func newAPIViolationGen() *apiViolationGen { - return &apiViolationGen{ - linter: newAPILinter(), - } -} - -type apiViolationGen struct { - generator.DefaultGen - - linter *apiLinter -} - -func (v *apiViolationGen) FileType() string { return apiViolationFileType } -func (v *apiViolationGen) Filename() string { - return "this file is ignored by the file assembler" -} - -func (v *apiViolationGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - klog.V(5).Infof("validating API rules for type %v", t) - if err := v.linter.validate(t); err != nil { - return err - } - return nil -} - -// Finalize prints the API rule violations to report file (if specified from -// arguments) or stdout (default) -func (v *apiViolationGen) Finalize(c *generator.Context, w io.Writer) error { - // NOTE: we don't return error here because we assume that the report file will - // get evaluated afterwards to determine if error should be raised. For example, - // you can have make rules that compare the report file with existing known - // violations (whitelist) and determine no error if no change is detected. - v.linter.report(w) - return nil -} - -// apiLinter is the framework hosting multiple API rules and recording API rule +// apiLinter is the framework hosting mutliple API rules and recording API rule // violations type apiLinter struct { // API rules that implement APIRule interface and output API rule violations @@ -138,7 +40,6 @@ func newAPILinter() *apiLinter { return &apiLinter{ rules: []APIRule{ &rules.NamesMatch{}, - &rules.OmitEmptyMatchCase{}, }, } } @@ -156,25 +57,6 @@ type apiViolation struct { field string } -// apiViolations implements sort.Interface for []apiViolation based on the fields: rule, -// packageName, typeName and field. -type apiViolations []apiViolation - -func (a apiViolations) Len() int { return len(a) } -func (a apiViolations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a apiViolations) Less(i, j int) bool { - if a[i].rule != a[j].rule { - return a[i].rule < a[j].rule - } - if a[i].packageName != a[j].packageName { - return a[i].packageName < a[j].packageName - } - if a[i].typeName != a[j].typeName { - return a[i].typeName < a[j].typeName - } - return a[i].field < a[j].field -} - // APIRule is the interface for validating API rule on Go types type APIRule interface { // Validate evaluates API rule on type t and returns a list of field names in @@ -189,7 +71,7 @@ type APIRule interface { // validate runs all API rules on type t and records any API rule violation func (l *apiLinter) validate(t *types.Type) error { for _, r := range l.rules { - klog.V(5).Infof("validating API rule %v for type %v", r.Name(), t) + glog.V(5).Infof("validating API rule %v for type %v", r.Name(), t) fields, err := r.Validate(t) if err != nil { return err @@ -208,7 +90,6 @@ func (l *apiLinter) validate(t *types.Type) error { // report prints any API rule violation to writer w and returns error if violation exists func (l *apiLinter) report(w io.Writer) error { - sort.Sort(apiViolations(l.violations)) for _, v := range l.violations { fmt.Fprintf(w, "API rule violation: %s,%s,%s,%s\n", v.rule, v.packageName, v.typeName, v.field) } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/config.go b/vendor/k8s.io/kube-openapi/pkg/generators/config.go deleted file mode 100644 index 33cd9eb5a..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/generators/config.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generators - -import ( - "fmt" - "path/filepath" - - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - "k8s.io/klog" - - generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" -) - -type identityNamer struct{} - -func (_ identityNamer) Name(t *types.Type) string { - return t.Name.String() -} - -var _ namer.Namer = identityNamer{} - -// NameSystems returns the name system used by the generators in this package. -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "raw": namer.NewRawNamer("", nil), - "sorting_namer": identityNamer{}, - } -} - -// DefaultNameSystem returns the default name system for ordering the types to be -// processed by the generators in this package. -func DefaultNameSystem() string { - return "sorting_namer" -} - -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() - if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) - } - header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) - header = append(header, []byte( - ` -// This file was autogenerated by openapi-gen. Do not edit it manually! - -`)...) - - reportPath := "-" - if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok { - reportPath = customArgs.ReportFilename - } - context.FileTypes[apiViolationFileType] = apiViolationFile{ - unmangledPath: reportPath, - } - - return generator.Packages{ - &generator.DefaultPackage{ - PackageName: filepath.Base(arguments.OutputPackagePath), - PackagePath: arguments.OutputPackagePath, - HeaderText: header, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{ - newOpenAPIGen( - arguments.OutputFileBaseName, - arguments.OutputPackagePath, - ), - newAPIViolationGen(), - } - }, - FilterFunc: apiTypeFilterFunc, - }, - } -} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go index 14eab18f6..befe38db2 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go @@ -36,20 +36,20 @@ type extensionAttributes struct { // Extension tag to openapi extension attributes var tagToExtension = map[string]extensionAttributes{ - "patchMergeKey": { + "patchMergeKey": extensionAttributes{ xName: "x-kubernetes-patch-merge-key", kind: types.Slice, }, - "patchStrategy": { + "patchStrategy": extensionAttributes{ xName: "x-kubernetes-patch-strategy", kind: types.Slice, allowedValues: sets.NewString("merge", "retainKeys"), }, - "listMapKey": { + "listMapKey": extensionAttributes{ xName: "x-kubernetes-list-map-keys", kind: types.Slice, }, - "listType": { + "listType": extensionAttributes{ xName: "x-kubernetes-list-type", kind: types.Slice, allowedValues: sets.NewString("atomic", "set", "map"), diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 11d42b6d3..d6c6275a7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -20,17 +20,20 @@ import ( "bytes" "fmt" "io" + "os" "path/filepath" "reflect" "sort" "strings" + "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" + generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" openapi "k8s.io/kube-openapi/pkg/common" - "k8s.io/klog" + "github.com/golang/glog" ) // This is the comment tag that carries parameters for open API generation. @@ -85,19 +88,69 @@ func hasOptionalTag(m *types.Member) bool { return hasOptionalCommentTag || hasOptionalJsonTag } -func apiTypeFilterFunc(c *generator.Context, t *types.Type) bool { - // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen - if strings.HasPrefix(t.Name.Name, "codecSelfer") { - return false +type identityNamer struct{} + +func (_ identityNamer) Name(t *types.Type) string { + return t.Name.String() +} + +var _ namer.Namer = identityNamer{} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer("", nil), + "sorting_namer": identityNamer{}, } - pkg := c.Universe.Package(t.Name.Package) - if hasOpenAPITagValue(pkg.Comments, tagValueTrue) { - return !hasOpenAPITagValue(t.CommentLines, tagValueFalse) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "sorting_namer" +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + glog.Fatalf("Failed loading boilerplate: %v", err) } - if hasOpenAPITagValue(t.CommentLines, tagValueTrue) { - return true + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + header = append(header, []byte( + ` +// This file was autogenerated by openapi-gen. Do not edit it manually! + +`)...) + + reportFilename := "-" + if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok { + reportFilename = customArgs.ReportFilename + } + + return generator.Packages{ + &generator.DefaultPackage{ + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, arguments.OutputPackagePath, context, newAPILinter(), reportFilename)} + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen + if strings.HasPrefix(t.Name.Name, "codecSelfer") { + return false + } + pkg := context.Universe.Package(t.Name.Package) + if hasOpenAPITagValue(pkg.Comments, tagValueTrue) { + return !hasOpenAPITagValue(t.CommentLines, tagValueFalse) + } + if hasOpenAPITagValue(t.CommentLines, tagValueTrue) { + return true + } + return false + }, + }, } - return false } const ( @@ -109,17 +162,24 @@ const ( type openAPIGen struct { generator.DefaultGen // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. - targetPackage string - imports namer.ImportTracker + targetPackage string + imports namer.ImportTracker + types []*types.Type + context *generator.Context + linter *apiLinter + reportFilename string } -func newOpenAPIGen(sanitizedName string, targetPackage string) generator.Generator { +func NewOpenAPIGen(sanitizedName string, targetPackage string, context *generator.Context, linter *apiLinter, reportFilename string) generator.Generator { return &openAPIGen{ DefaultGen: generator.DefaultGen{ OptionalName: sanitizedName, }, - imports: generator.NewImportTracker(), - targetPackage: targetPackage, + imports: generator.NewImportTracker(), + targetPackage: targetPackage, + context: context, + linter: linter, + reportFilename: reportFilename, } } @@ -138,6 +198,15 @@ func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { } } +func (g *openAPIGen) Filter(c *generator.Context, t *types.Type) bool { + // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen + if strings.HasPrefix(t.Name.Name, "codecSelfer") { + return false + } + g.types = append(g.types, t) + return true +} + func (g *openAPIGen) isOtherPackage(pkg string) bool { if pkg == g.targetPackage { return false @@ -170,7 +239,7 @@ func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error { sw.Do("func GetOpenAPIDefinitions(ref $.ReferenceCallback|raw$) map[string]$.OpenAPIDefinition|raw$ {\n", argsFromType(nil)) sw.Do("return map[string]$.OpenAPIDefinition|raw${\n", argsFromType(nil)) - for _, t := range c.Order { + for _, t := range g.types { err := newOpenAPITypeWriter(sw).generateCall(t) if err != nil { return err @@ -184,7 +253,11 @@ func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error { } func (g *openAPIGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - klog.V(5).Infof("generating for type %v", t) + glog.V(5).Infof("validating API rules for type %v", t) + if err := g.linter.validate(t); err != nil { + return err + } + glog.V(5).Infof("generating for type %v", t) sw := generator.NewSnippetWriter(w, c, "$", "$") err := newOpenAPITypeWriter(sw).generate(t) if err != nil { @@ -289,7 +362,7 @@ func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([] required = append(required, name) } if err = g.generateProperty(&m, t); err != nil { - klog.Errorf("Error when generating: %v, %v\n", name, m) + glog.Errorf("Error when generating: %v, %v\n", name, m) return required, err } } @@ -376,7 +449,7 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { // Initially, we will only log struct extension errors. if len(errors) > 0 { for _, e := range errors { - klog.V(2).Infof("[%s]: %s\n", t.String(), e) + glog.V(2).Infof("[%s]: %s\n", t.String(), e) } } // TODO(seans3): Validate struct extensions here. @@ -392,7 +465,7 @@ func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *typ if len(errors) > 0 { errorPrefix := fmt.Sprintf("[%s] %s:", parent.String(), m.String()) for _, e := range errors { - klog.V(2).Infof("%s %s\n", errorPrefix, e) + glog.V(2).Infof("%s %s\n", errorPrefix, e) } } g.emitExtensions(extensions) @@ -605,3 +678,27 @@ func (g openAPITypeWriter) generateSliceProperty(t *types.Type) error { g.Do("},\n},\n},\n", nil) return nil } + +// Finalize prints the API rule violations to report file (if specified from arguments) or stdout (default) +func (g *openAPIGen) Finalize(c *generator.Context, w io.Writer) error { + // If report file isn't specified, return error to force user to choose either stdout ("-") or a file name + if len(g.reportFilename) == 0 { + return fmt.Errorf("empty report file name: please provide a valid file name or use the default \"-\" (stdout)") + } + // If stdout is specified, print violations and return error + if g.reportFilename == "-" { + return g.linter.report(os.Stdout) + } + // Otherwise, print violations to report file and return nil + f, err := os.Create(g.reportFilename) + if err != nil { + return err + } + defer f.Close() + g.linter.report(f) + // NOTE: we don't return error here because we assume that the report file will + // get evaluated afterwards to determine if error should be raised. For example, + // you can have make rules that compare the report file with existing known + // violations (whitelist) and determine no error if no change is detected. + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go deleted file mode 100644 index dd37ad8a5..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rules - -import ( - "reflect" - "strings" - - "k8s.io/gengo/types" -) - -// OmitEmptyMatchCase implements APIRule interface. -// "omitempty" must appear verbatim (no case variants). -type OmitEmptyMatchCase struct{} - -func (n *OmitEmptyMatchCase) Name() string { - return "omitempty_match_case" -} - -func (n *OmitEmptyMatchCase) Validate(t *types.Type) ([]string, error) { - fields := make([]string, 0) - - // Only validate struct type and ignore the rest - switch t.Kind { - case types.Struct: - for _, m := range t.Members { - goName := m.Name - jsonTag, ok := reflect.StructTag(m.Tags).Lookup("json") - if !ok { - continue - } - - parts := strings.Split(jsonTag, ",") - if len(parts) < 2 { - // no tags other than name - continue - } - if parts[0] == "-" { - // not serialized - continue - } - for _, part := range parts[1:] { - if strings.EqualFold(part, "omitempty") && part != "omitempty" { - fields = append(fields, goName) - } - } - } - } - return fields, nil -} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 890a39399..a57dcd363 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -196,24 +196,20 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error } fields := map[string]Schema{} - fieldOrder := []string{} for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { var err error - name := namedSchema.GetName() - path := path.FieldPath(name) - fields[name], err = d.ParseSchema(namedSchema.GetValue(), &path) + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) if err != nil { return nil, err } - fieldOrder = append(fieldOrder, name) } return &Kind{ BaseSchema: d.parseBaseSchema(s, path), RequiredFields: s.GetRequired(), Fields: fields, - FieldOrder: fieldOrder, }, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 46643aa50..f26b5ef88 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -173,8 +173,6 @@ type Kind struct { RequiredFields []string // Maps field names to types. Fields map[string]Schema - // FieldOrder reports the canonical order for the fields. - FieldOrder []string } var _ Schema = &Kind{}