From b8fb36d20e0987383201a055a172f372642d2cf1 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 24 Apr 2020 10:38:27 -0400 Subject: [PATCH 1/3] Update cortex to latest. This brings fixes for traceID in the logger and swift. I've also added a bit of configuration for swift. Fixes https://github.com/grafana/loki/issues/905 Signed-off-by: Cyril Tovena --- docs/configuration/README.md | 62 +- go.mod | 2 +- go.sum | 4 + .../cortexproject/cortex/pkg/api/api.go | 19 +- .../chunk/openstack/swift_object_client.go | 178 ++ .../cortex/pkg/chunk/storage/factory.go | 10 + .../cortexproject/cortex/pkg/cortex/cortex.go | 3 + .../cortex/pkg/distributor/distributor.go | 37 + .../cortex/pkg/ingester/client/compat.go | 26 + .../cortex/pkg/ingester/client/cortex.pb.go | 645 ++++- .../cortex/pkg/ingester/client/cortex.proto | 8 + .../cortex/pkg/ingester/ingester.go | 179 +- .../cortex/pkg/ingester/ingester_v2.go | 24 +- .../cortex/pkg/ingester/limiter.go | 87 +- .../cortex/pkg/ingester/metrics.go | 53 +- .../pkg/ingester/user_metrics_metadata.go | 106 + .../cortex/pkg/ingester/user_state.go | 18 +- .../cortexproject/cortex/pkg/ingester/wal.go | 320 ++- .../pkg/querier/distributor_queryable.go | 2 + .../cortex/pkg/querier/frontend/frontend.go | 20 +- .../cortexproject/cortex/pkg/ruler/ruler.go | 8 + .../cortexproject/cortex/pkg/ruler/storage.go | 15 + .../cortexproject/cortex/pkg/util/log.go | 7 +- .../cortex/pkg/util/spanlogger/spanlogger.go | 4 +- .../cortex/pkg/util/validation/limits.go | 31 + .../cortex/pkg/util/validation/validate.go | 2 + .../objectstorage/v1/accounts/doc.go | 29 + .../objectstorage/v1/accounts/requests.go | 100 + .../objectstorage/v1/accounts/results.go | 180 ++ .../objectstorage/v1/accounts/urls.go | 11 + .../objectstorage/v1/containers/doc.go | 95 + .../objectstorage/v1/containers/requests.go | 231 ++ .../objectstorage/v1/containers/results.go | 344 +++ .../objectstorage/v1/containers/urls.go | 23 + .../openstack/objectstorage/v1/objects/doc.go | 106 + .../objectstorage/v1/objects/errors.go | 13 + .../objectstorage/v1/objects/requests.go | 499 ++++ .../objectstorage/v1/objects/results.go | 580 +++++ .../objectstorage/v1/objects/urls.go | 33 + vendor/github.com/ncw/swift/.gitignore | 4 + vendor/github.com/ncw/swift/.travis.yml | 33 + vendor/github.com/ncw/swift/COPYING | 20 + vendor/github.com/ncw/swift/README.md | 161 ++ vendor/github.com/ncw/swift/auth.go | 335 +++ vendor/github.com/ncw/swift/auth_v3.go | 300 +++ .../github.com/ncw/swift/compatibility_1_0.go | 28 + .../github.com/ncw/swift/compatibility_1_1.go | 24 + .../github.com/ncw/swift/compatibility_1_6.go | 23 + .../ncw/swift/compatibility_not_1_6.go | 13 + vendor/github.com/ncw/swift/dlo.go | 149 ++ vendor/github.com/ncw/swift/doc.go | 19 + vendor/github.com/ncw/swift/go.mod | 1 + vendor/github.com/ncw/swift/largeobjects.go | 448 ++++ vendor/github.com/ncw/swift/meta.go | 174 ++ vendor/github.com/ncw/swift/notes.txt | 55 + vendor/github.com/ncw/swift/slo.go | 171 ++ vendor/github.com/ncw/swift/swift.go | 2270 +++++++++++++++++ vendor/github.com/ncw/swift/timeout_reader.go | 59 + .../github.com/ncw/swift/travis_realserver.sh | 22 + .../github.com/ncw/swift/watchdog_reader.go | 55 + .../thanos/pkg/objstore/swift/swift.go | 320 +++ vendor/modules.txt | 9 +- 62 files changed, 8574 insertions(+), 233 deletions(-) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go create mode 100644 vendor/github.com/ncw/swift/.gitignore create mode 100644 vendor/github.com/ncw/swift/.travis.yml create mode 100644 vendor/github.com/ncw/swift/COPYING create mode 100644 vendor/github.com/ncw/swift/README.md create mode 100644 vendor/github.com/ncw/swift/auth.go create mode 100644 vendor/github.com/ncw/swift/auth_v3.go create mode 100644 vendor/github.com/ncw/swift/compatibility_1_0.go create mode 100644 vendor/github.com/ncw/swift/compatibility_1_1.go create mode 100644 vendor/github.com/ncw/swift/compatibility_1_6.go create mode 100644 vendor/github.com/ncw/swift/compatibility_not_1_6.go create mode 100644 vendor/github.com/ncw/swift/dlo.go create mode 100644 vendor/github.com/ncw/swift/doc.go create mode 100644 vendor/github.com/ncw/swift/go.mod create mode 100644 vendor/github.com/ncw/swift/largeobjects.go create mode 100644 vendor/github.com/ncw/swift/meta.go create mode 100644 vendor/github.com/ncw/swift/notes.txt create mode 100644 vendor/github.com/ncw/swift/slo.go create mode 100644 vendor/github.com/ncw/swift/swift.go create mode 100644 vendor/github.com/ncw/swift/timeout_reader.go create mode 100644 vendor/github.com/ncw/swift/travis_realserver.sh create mode 100644 vendor/github.com/ncw/swift/watchdog_reader.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go diff --git a/docs/configuration/README.md b/docs/configuration/README.md index ae333482e9c8..d529536e2840 100644 --- a/docs/configuration/README.md +++ b/docs/configuration/README.md @@ -539,6 +539,66 @@ cassandra: # Initial connection timeout during initial dial to server. [connect_timeout: | default = 600ms] +swift: + # Openstack authentication URL. + # CLI flag: -ruler.storage.swift.auth-url + [auth_url: | default = ""] + + # Openstack username for the api. + # CLI flag: -ruler.storage.swift.username + [username: | default = ""] + + # Openstack user's domain name. + # CLI flag: -ruler.storage.swift.user-domain-name + [user_domain_name: | default = ""] + + # Openstack user's domain id. + # CLI flag: -ruler.storage.swift.user-domain-id + [user_domain_id: | default = ""] + + # Openstack userid for the api. + # CLI flag: -ruler.storage.swift.user-id + [user_id: | default = ""] + + # Openstack api key. + # CLI flag: -ruler.storage.swift.password + [password: | default = ""] + + # Openstack user's domain id. + # CLI flag: -ruler.storage.swift.domain-id + [domain_id: | default = ""] + + # Openstack user's domain name. + # CLI flag: -ruler.storage.swift.domain-name + [domain_name: | default = ""] + + # Openstack project id (v2,v3 auth only). + # CLI flag: -ruler.storage.swift.project-id + [project_id: | default = ""] + + # Openstack project name (v2,v3 auth only). + # CLI flag: -ruler.storage.swift.project-name + [project_name: | default = ""] + + # Id of the project's domain (v3 auth only), only needed if it differs the + # from user domain. + # CLI flag: -ruler.storage.swift.project-domain-id + [project_domain_id: | default = ""] + + # Name of the project's domain (v3 auth only), only needed if it differs + # from the user domain. + # CLI flag: -ruler.storage.swift.project-domain-name + [project_domain_name: | default = ""] + + # Openstack Region to use eg LON, ORD - default is use first region (v2,v3 + # auth only) + # CLI flag: -ruler.storage.swift.region-name + [region_name: | default = ""] + + # Name of the Swift container to put chunks in. + # CLI flag: -ruler.storage.swift.container-name + [container_name: | default = "cortex"] + # Configures storing index in BoltDB. Required fields only # required when boltdb is present in config. boltdb: @@ -697,7 +757,7 @@ for from specific time periods. store: # Which store to use for the chunks. Either aws, aws-dynamo, azure, gcp, -# bigtable, gcs, cassandra, or filesystem. If omitted, defaults to the same +# bigtable, gcs, cassandra, swift or filesystem. If omitted, defaults to the same # value as store. [object_store: ] diff --git a/go.mod b/go.mod index 832c749d4230..8503b101b2c8 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/containerd/containerd v1.3.2 // indirect github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e - github.com/cortexproject/cortex v1.0.1-0.20200423101820-36496a074bc4 + github.com/cortexproject/cortex v1.0.1-0.20200424135841-64fb9ad94a38 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 diff --git a/go.sum b/go.sum index 8aa8cf368421..9204ae3c20c1 100644 --- a/go.sum +++ b/go.sum @@ -172,6 +172,8 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= github.com/cortexproject/cortex v1.0.1-0.20200423101820-36496a074bc4 h1:SNBpM6lX8ZjDsSrQWbxP1FRO8KXirnRwFvtcLA8+DCc= github.com/cortexproject/cortex v1.0.1-0.20200423101820-36496a074bc4/go.mod h1:S2BogfHdb0YCo5Zly3vOEsqzsE7YXdumHBMRJkgDZm4= +github.com/cortexproject/cortex v1.0.1-0.20200424135841-64fb9ad94a38 h1:zvaE5fX7A1ZcrAuXYxhXoDSVqI6Q1byZEqYAK+9KhAM= +github.com/cortexproject/cortex v1.0.1-0.20200424135841-64fb9ad94a38/go.mod h1:CTLwVWnV5PfcLZqzZVe+0Sa69pRrhyT2d9g0ml5S9aQ= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -631,6 +633,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/ncw/swift v1.0.50 h1:E01b5bVIssNhx2KnzAjMWEXkKrb8ytTqCDWY7lqmWjA= +github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go index 7aeb6e16f338..a429262a0764 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -7,6 +7,8 @@ import ( "regexp" "strings" + "github.com/opentracing-contrib/go-stdlib/nethttp" + "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/go-kit/kit/log" @@ -274,8 +276,8 @@ func (a *API) RegisterQuerier(queryable storage.Queryable, engine *promql.Engine a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/chunks", querier.ChunksHandler(queryable), true) // these routes are either registered the default server OR to an internal mux. The internal mux is - // for use in a single binary mode when both the query frontend and the querier would attempt to claim these routes - // TODO: Add support to expose querier paths with a configurable prefix in single binary mode. + // for use in a single binary mode when both the query frontend and the querier would attempt to claim these routes + // TODO: Add support to expose querier paths with a configurable prefix in single binary mode. router := mux.NewRouter() if registerRoutesExternally { router = a.server.HTTP @@ -305,7 +307,18 @@ func (a *API) RegisterQuerier(queryable storage.Queryable, engine *promql.Engine a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/series", legacyPromHandler, true, "GET", "POST", "DELETE") a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/metadata", legacyPromHandler, true, "GET") - return router + // if we have externally registered routes then we need to return the server handler + // so that we continue to use all standard middleware + if registerRoutesExternally { + return a.server.HTTPServer.Handler + } + + // Since we have a new router and the request will not go trough the default server + // HTTP middleware stack, we need to add a middleware to extract the trace context + // from the HTTP headers and inject it into the Go context. + return nethttp.MiddlewareFunc(opentracing.GlobalTracer(), router.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string { + return "internalQuerier" + })) } // RegisterQueryFrontend registers the Prometheus routes supported by the diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go new file mode 100644 index 000000000000..a30dfbbbfd70 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go @@ -0,0 +1,178 @@ +package openstack + +import ( + "bytes" + "context" + "flag" + "fmt" + "io" + "io/ioutil" + + "github.com/ncw/swift" + thanos "github.com/thanos-io/thanos/pkg/objstore/swift" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/util" +) + +type SwiftObjectClient struct { + conn *swift.Connection + cfg SwiftConfig + delimiter rune +} + +// SwiftConfig is config for the Swift Chunk Client. +type SwiftConfig struct { + thanos.SwiftConfig `yaml:",inline"` +} + +// RegisterFlags registers flags. +func (cfg *SwiftConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +// Validate config and returns error on failure +func (cfg *SwiftConfig) Validate() error { + return nil +} + +// RegisterFlagsWithPrefix registers flags with prefix. +func (cfg *SwiftConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.ContainerName, prefix+"swift.container-name", "cortex", "Name of the Swift container to put chunks in.") + f.StringVar(&cfg.DomainName, prefix+"swift.domain-name", "", "Openstack user's domain name.") + f.StringVar(&cfg.DomainId, prefix+"swift.domain-id", "", "Openstack user's domain id.") + f.StringVar(&cfg.UserDomainName, prefix+"swift.user-domain-name", "", "Openstack user's domain name.") + f.StringVar(&cfg.UserDomainID, prefix+"swift.user-domain-id", "", "Openstack user's domain id.") + f.StringVar(&cfg.Username, prefix+"swift.username", "", "Openstack username for the api.") + f.StringVar(&cfg.UserId, prefix+"swift.user-id", "", "Openstack userid for the api.") + f.StringVar(&cfg.Password, prefix+"swift.password", "", "Openstack api key.") + f.StringVar(&cfg.AuthUrl, prefix+"swift.auth-url", "", "Openstack authentication URL.") + f.StringVar(&cfg.RegionName, prefix+"swift.region-name", "", "Openstack Region to use eg LON, ORD - default is use first region (v2,v3 auth only)") + f.StringVar(&cfg.ProjectName, prefix+"swift.project-name", "", "Openstack project name (v2,v3 auth only).") + f.StringVar(&cfg.ProjectID, prefix+"swift.project-id", "", "Openstack project id (v2,v3 auth only).") + f.StringVar(&cfg.ProjectDomainName, prefix+"swift.project-domain-name", "", "Name of the project's domain (v3 auth only), only needed if it differs from the user domain.") + f.StringVar(&cfg.ProjectDomainID, prefix+"swift.project-domain-id", "", "Id of the project's domain (v3 auth only), only needed if it differs the from user domain.") +} + +// NewSwiftObjectClient makes a new chunk.Client that writes chunks to OpenStack Swift. +func NewSwiftObjectClient(cfg SwiftConfig, delimiter string) (*SwiftObjectClient, error) { + util.WarnExperimentalUse("OpenStack Swift Storage") + + // Create a connection + c := &swift.Connection{ + AuthUrl: cfg.AuthUrl, + ApiKey: cfg.Password, + UserName: cfg.Username, + UserId: cfg.UserId, + + TenantId: cfg.ProjectID, + Tenant: cfg.ProjectName, + TenantDomain: cfg.ProjectDomainName, + TenantDomainId: cfg.ProjectDomainID, + + Domain: cfg.DomainName, + DomainId: cfg.DomainId, + + Region: cfg.RegionName, + } + + switch { + case cfg.UserDomainName != "": + c.Domain = cfg.UserDomainName + case cfg.UserDomainID != "": + c.DomainId = cfg.UserDomainID + } + + if len(delimiter) > 1 { + return nil, fmt.Errorf("delimiter must be a single character but was %s", delimiter) + } + var delim rune + if len(delimiter) != 0 { + delim = []rune(delimiter)[0] + } + + // Authenticate + err := c.Authenticate() + if err != nil { + return nil, err + } + + // Ensure the container is created, no error is returned if it already exists. + if err := c.ContainerCreate(cfg.ContainerName, nil); err != nil { + return nil, err + } + + return &SwiftObjectClient{ + conn: c, + cfg: cfg, + delimiter: delim, + }, nil +} + +func (s *SwiftObjectClient) Stop() { + s.conn.UnAuthenticate() +} + +// GetObject returns a reader for the specified object key from the configured swift container. If the +// key does not exist a generic chunk.ErrStorageObjectNotFound error is returned. +func (s *SwiftObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error) { + var buf bytes.Buffer + _, err := s.conn.ObjectGet(s.cfg.ContainerName, objectKey, &buf, false, nil) + if err != nil { + if err == swift.ObjectNotFound { + return nil, chunk.ErrStorageObjectNotFound + } + return nil, err + } + + return ioutil.NopCloser(&buf), nil +} + +// PutObject puts the specified bytes into the configured Swift container at the provided key +func (s *SwiftObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { + _, err := s.conn.ObjectPut(s.cfg.ContainerName, objectKey, object, false, "", "", nil) + return err +} + +// List only objects from the store non-recursively +func (s *SwiftObjectClient) List(ctx context.Context, prefix string) ([]chunk.StorageObject, []chunk.StorageCommonPrefix, error) { + objs, err := s.conn.Objects(s.cfg.ContainerName, &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: s.delimiter, + }) + if err != nil { + return nil, nil, err + } + + var storageObjects []chunk.StorageObject + var storagePrefixes []chunk.StorageCommonPrefix + + for _, obj := range objs { + // based on the docs when subdir is set, it means it's a pseudo directory. + // see https://docs.openstack.org/swift/latest/api/pseudo-hierarchical-folders-directories.html + if obj.SubDir != "" { + storagePrefixes = append(storagePrefixes, chunk.StorageCommonPrefix(obj.SubDir)) + continue + } + + storageObjects = append(storageObjects, chunk.StorageObject{ + Key: obj.Name, + ModifiedAt: obj.LastModified, + }) + } + + return storageObjects, storagePrefixes, nil +} + +// DeleteObject deletes the specified object key from the configured Swift container. If the +// key does not exist a generic chunk.ErrStorageObjectNotFound error is returned. +func (s *SwiftObjectClient) DeleteObject(ctx context.Context, objectKey string) error { + err := s.conn.ObjectDelete(s.cfg.ContainerName, objectKey) + if err == nil { + return nil + } + if err == swift.ObjectNotFound { + return chunk.ErrStorageObjectNotFound + } + return err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 99d53c5b15d4..30c91c5bae9c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -19,6 +19,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/gcp" "github.com/cortexproject/cortex/pkg/chunk/local" "github.com/cortexproject/cortex/pkg/chunk/objectclient" + "github.com/cortexproject/cortex/pkg/chunk/openstack" "github.com/cortexproject/cortex/pkg/chunk/purger" "github.com/cortexproject/cortex/pkg/util" ) @@ -65,6 +66,7 @@ type Config struct { CassandraStorageConfig cassandra.Config `yaml:"cassandra"` BoltDBConfig local.BoltDBConfig `yaml:"boltdb"` FSConfig local.FSConfig `yaml:"filesystem"` + Swift openstack.SwiftConfig `yaml:"swift"` IndexCacheValidity time.Duration `yaml:"index_cache_validity"` @@ -83,6 +85,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.BoltDBConfig.RegisterFlags(f) cfg.FSConfig.RegisterFlags(f) cfg.DeleteStoreConfig.RegisterFlags(f) + cfg.Swift.RegisterFlags(f) f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or tsdb. Be aware tsdb is experimental and shouldn't be used in production.") cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "Cache config for index entry reading. ", f) @@ -97,6 +100,9 @@ func (cfg *Config) Validate() error { if err := cfg.CassandraStorageConfig.Validate(); err != nil { return errors.Wrap(err, "invalid Cassandra Storage config") } + if err := cfg.Swift.Validate(); err != nil { + return errors.Wrap(err, "invalid Swift Storage config") + } return nil } @@ -221,6 +227,8 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcs": return newChunkClientFromStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig, chunk.DirDelim)) + case "swift": + return newChunkClientFromStore(openstack.NewSwiftObjectClient(cfg.Swift, chunk.DirDelim)) case "cassandra": return cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg) case "filesystem": @@ -290,6 +298,8 @@ func NewObjectClient(name string, cfg Config) (chunk.ObjectClient, error) { return gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig, chunk.DirDelim) case "azure": return azure.NewBlobStorage(&cfg.AzureStorageConfig, chunk.DirDelim) + case "swift": + return openstack.NewSwiftObjectClient(cfg.Swift, chunk.DirDelim) case "inmemory": return chunk.NewMockStorage(), nil case "filesystem": diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go index e79b102b13c1..b4d0fad84adb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go @@ -156,6 +156,9 @@ func (c *Config) Validate(log log.Logger) error { if err := c.Storage.Validate(); err != nil { return errors.Wrap(err, "invalid storage config") } + if err := c.Ruler.Validate(); err != nil { + return errors.Wrap(err, "invalid ruler config") + } if err := c.TSDB.Validate(); err != nil { return errors.Wrap(err, "invalid TSDB config") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index 546dfa157846..224d2cc81590 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/scrape" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/user" @@ -701,6 +702,42 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through return result, nil } +// MetricMetadata returns all metric metadata of a user. +func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { + req := &ingester_client.MetricsMetadataRequest{} + // TODO: We only need to look in all the ingesters if we're shardByAllLabels is enabled. + // Look into distributor/query.go + resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + return client.MetricsMetadata(ctx, req) + }) + if err != nil { + return nil, err + } + + result := []scrape.MetricMetadata{} + dedupTracker := map[ingester_client.MetricMetadata]struct{}{} + for _, resp := range resps { + r := resp.(*client.MetricsMetadataResponse) + for _, m := range r.Metadata { + // Given we look across all ingesters - dedup the metadata. + _, ok := dedupTracker[*m] + if ok { + continue + } + dedupTracker[*m] = struct{}{} + + result = append(result, scrape.MetricMetadata{ + Metric: m.MetricName, + Help: m.Help, + Unit: m.Unit, + Type: client.MetricMetadataMetricTypeToMetricType(m.GetType()), + }) + } + } + + return result, nil +} + // UserStats returns statistics about the current user. func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { req := &client.UserStatsRequest{} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go index bacf98117fba..723473ce75d9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go @@ -12,6 +12,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/textparse" "github.com/cortexproject/cortex/pkg/util" ) @@ -138,6 +139,31 @@ func FromMetricsForLabelMatchersResponse(resp *MetricsForLabelMatchersResponse) return metrics } +// MetricMetadataMetricTypeToMetricType converts a metric type from our internal client +// to a Prometheus one. +func MetricMetadataMetricTypeToMetricType(mt MetricMetadata_MetricType) textparse.MetricType { + switch mt { + case UNKNOWN: + return textparse.MetricTypeUnknown + case COUNTER: + return textparse.MetricTypeCounter + case GAUGE: + return textparse.MetricTypeGauge + case HISTOGRAM: + return textparse.MetricTypeHistogram + case GAUGEHISTOGRAM: + return textparse.MetricTypeGaugeHistogram + case SUMMARY: + return textparse.MetricTypeSummary + case INFO: + return textparse.MetricTypeInfo + case STATESET: + return textparse.MetricTypeStateset + default: + return textparse.MetricTypeUnknown + } +} + func toLabelMatchers(matchers []*labels.Matcher) ([]*LabelMatcher, error) { result := make([]*LabelMatcher, 0, len(matchers)) for _, matcher := range matchers { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go index 7441a4a7244b..027b6b2c97dc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go @@ -116,7 +116,7 @@ var MetricMetadata_MetricType_value = map[string]int32{ } func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{24, 0} + return fileDescriptor_893a47d0a749d749, []int{26, 0} } type WriteRequest struct { @@ -908,6 +908,84 @@ func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric { return nil } +type MetricsMetadataRequest struct { +} + +func (m *MetricsMetadataRequest) Reset() { *m = MetricsMetadataRequest{} } +func (*MetricsMetadataRequest) ProtoMessage() {} +func (*MetricsMetadataRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{17} +} +func (m *MetricsMetadataRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricsMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricsMetadataRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricsMetadataRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsMetadataRequest.Merge(m, src) +} +func (m *MetricsMetadataRequest) XXX_Size() int { + return m.Size() +} +func (m *MetricsMetadataRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsMetadataRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsMetadataRequest proto.InternalMessageInfo + +type MetricsMetadataResponse struct { + Metadata []*MetricMetadata `protobuf:"bytes,1,rep,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (m *MetricsMetadataResponse) Reset() { *m = MetricsMetadataResponse{} } +func (*MetricsMetadataResponse) ProtoMessage() {} +func (*MetricsMetadataResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{18} +} +func (m *MetricsMetadataResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricsMetadataResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricsMetadataResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricsMetadataResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsMetadataResponse.Merge(m, src) +} +func (m *MetricsMetadataResponse) XXX_Size() int { + return m.Size() +} +func (m *MetricsMetadataResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsMetadataResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsMetadataResponse proto.InternalMessageInfo + +func (m *MetricsMetadataResponse) GetMetadata() []*MetricMetadata { + if m != nil { + return m.Metadata + } + return nil +} + type TimeSeriesChunk struct { FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` @@ -918,7 +996,7 @@ type TimeSeriesChunk struct { func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } func (*TimeSeriesChunk) ProtoMessage() {} func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{17} + return fileDescriptor_893a47d0a749d749, []int{19} } func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -978,7 +1056,7 @@ type Chunk struct { func (m *Chunk) Reset() { *m = Chunk{} } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{18} + return fileDescriptor_893a47d0a749d749, []int{20} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1041,7 +1119,7 @@ type TransferChunksResponse struct { func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } func (*TransferChunksResponse) ProtoMessage() {} func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{19} + return fileDescriptor_893a47d0a749d749, []int{21} } func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1079,7 +1157,7 @@ type TimeSeries struct { func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{20} + return fileDescriptor_893a47d0a749d749, []int{22} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1123,7 +1201,7 @@ type LabelPair struct { func (m *LabelPair) Reset() { *m = LabelPair{} } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{21} + return fileDescriptor_893a47d0a749d749, []int{23} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1174,7 +1252,7 @@ type Sample struct { func (m *Sample) Reset() { *m = Sample{} } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{22} + return fileDescriptor_893a47d0a749d749, []int{24} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1224,7 +1302,7 @@ type LabelMatchers struct { func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } func (*LabelMatchers) ProtoMessage() {} func (*LabelMatchers) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{23} + return fileDescriptor_893a47d0a749d749, []int{25} } func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1270,7 +1348,7 @@ type MetricMetadata struct { func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } func (*MetricMetadata) ProtoMessage() {} func (*MetricMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{24} + return fileDescriptor_893a47d0a749d749, []int{26} } func (m *MetricMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1334,7 +1412,7 @@ type Metric struct { func (m *Metric) Reset() { *m = Metric{} } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{25} + return fileDescriptor_893a47d0a749d749, []int{27} } func (m *Metric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1372,7 +1450,7 @@ type LabelMatcher struct { func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{26} + return fileDescriptor_893a47d0a749d749, []int{28} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1432,7 +1510,7 @@ type TimeSeriesFile struct { func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } func (*TimeSeriesFile) ProtoMessage() {} func (*TimeSeriesFile) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{27} + return fileDescriptor_893a47d0a749d749, []int{29} } func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1495,7 +1573,7 @@ type TransferTSDBResponse struct { func (m *TransferTSDBResponse) Reset() { *m = TransferTSDBResponse{} } func (*TransferTSDBResponse) ProtoMessage() {} func (*TransferTSDBResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{28} + return fileDescriptor_893a47d0a749d749, []int{30} } func (m *TransferTSDBResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1545,6 +1623,8 @@ func init() { proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse") proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest") proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse") + proto.RegisterType((*MetricsMetadataRequest)(nil), "cortex.MetricsMetadataRequest") + proto.RegisterType((*MetricsMetadataResponse)(nil), "cortex.MetricsMetadataResponse") proto.RegisterType((*TimeSeriesChunk)(nil), "cortex.TimeSeriesChunk") proto.RegisterType((*Chunk)(nil), "cortex.Chunk") proto.RegisterType((*TransferChunksResponse)(nil), "cortex.TransferChunksResponse") @@ -1562,98 +1642,100 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1444 bytes of a gzipped FileDescriptorProto + // 1488 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6, - 0x16, 0x26, 0xad, 0x87, 0xad, 0x23, 0x5a, 0xa1, 0xc7, 0x8e, 0xe3, 0x28, 0xf7, 0x52, 0xc9, 0x00, - 0xc9, 0x35, 0xee, 0xbd, 0x51, 0x52, 0x07, 0x69, 0xbd, 0x68, 0x11, 0xc8, 0x89, 0xec, 0xa8, 0xb5, - 0x64, 0x67, 0x24, 0x35, 0x6d, 0x81, 0x42, 0xa0, 0xa5, 0xb1, 0x4d, 0x94, 0xa4, 0x14, 0x3e, 0x8a, - 0x7a, 0x51, 0xa0, 0x40, 0x97, 0x5d, 0x34, 0xcb, 0xfe, 0x84, 0xae, 0xbb, 0xe9, 0xbe, 0xab, 0x2c, - 0xb3, 0x0c, 0xba, 0x08, 0x1a, 0x79, 0xd3, 0x65, 0xd0, 0x5f, 0x50, 0xcc, 0x83, 0x14, 0xa9, 0xc8, - 0x68, 0xfa, 0xc8, 0x8e, 0x73, 0xce, 0x37, 0xdf, 0x9c, 0x39, 0xaf, 0x39, 0x04, 0xad, 0x3f, 0xf4, - 0x02, 0xfa, 0x45, 0x75, 0xe4, 0x0d, 0x83, 0x21, 0xca, 0x8b, 0x55, 0xf9, 0xfa, 0x91, 0x15, 0x1c, - 0x87, 0x07, 0xd5, 0xfe, 0xd0, 0xb9, 0x71, 0x34, 0x3c, 0x1a, 0xde, 0xe0, 0xea, 0x83, 0xf0, 0x90, - 0xaf, 0xf8, 0x82, 0x7f, 0x89, 0x6d, 0xf8, 0x37, 0x15, 0xb4, 0x87, 0x9e, 0x15, 0x50, 0x42, 0x1f, - 0x85, 0xd4, 0x0f, 0x50, 0x0b, 0x20, 0xb0, 0x1c, 0xea, 0x53, 0xcf, 0xa2, 0xfe, 0x9a, 0x7a, 0x39, - 0xb3, 0x5e, 0xdc, 0x40, 0x55, 0x79, 0x54, 0xc7, 0x72, 0x68, 0x9b, 0x6b, 0xb6, 0xca, 0x4f, 0x9e, - 0x57, 0x94, 0x9f, 0x9f, 0x57, 0xd0, 0xbe, 0x47, 0x4d, 0xdb, 0x1e, 0xf6, 0x3b, 0xf1, 0x2e, 0x92, - 0x60, 0x40, 0xef, 0x40, 0xbe, 0x3d, 0x0c, 0xbd, 0x3e, 0x5d, 0x9b, 0xbb, 0xac, 0xae, 0x97, 0x36, - 0x2a, 0x11, 0x57, 0xf2, 0xd4, 0xaa, 0x80, 0xd4, 0xdd, 0xd0, 0x21, 0x79, 0x9f, 0x7f, 0xa3, 0x4d, - 0x58, 0x70, 0x68, 0x60, 0x0e, 0xcc, 0xc0, 0x5c, 0xcb, 0x70, 0x33, 0x56, 0xa3, 0xad, 0x4d, 0x1a, - 0x78, 0x56, 0xbf, 0x29, 0xb5, 0x5b, 0xd9, 0x27, 0xcf, 0x2b, 0x2a, 0x89, 0xd1, 0xb8, 0x02, 0x30, - 0xe1, 0x43, 0xf3, 0x90, 0xa9, 0xed, 0x37, 0x74, 0x05, 0x2d, 0x40, 0x96, 0x74, 0x77, 0xeb, 0xba, - 0x8a, 0xcf, 0xc1, 0xa2, 0x3c, 0xdd, 0x1f, 0x0d, 0x5d, 0x9f, 0xe2, 0xf7, 0xa0, 0x48, 0xa8, 0x39, - 0x88, 0x7c, 0x50, 0x85, 0xf9, 0x47, 0x61, 0xd2, 0x01, 0x2b, 0xd1, 0xc9, 0x0f, 0x42, 0xea, 0x9d, - 0x48, 0x18, 0x89, 0x40, 0xf8, 0x0e, 0x68, 0x62, 0xbb, 0xa0, 0x43, 0x37, 0x60, 0xde, 0xa3, 0x7e, - 0x68, 0x07, 0xd1, 0xfe, 0xf3, 0x53, 0xfb, 0x05, 0x8e, 0x44, 0x28, 0xfc, 0x9d, 0x0a, 0x5a, 0x92, - 0x1a, 0xfd, 0x1f, 0x90, 0x1f, 0x98, 0x5e, 0xd0, 0xe3, 0x9e, 0x0c, 0x4c, 0x67, 0xd4, 0x73, 0x18, - 0x99, 0xba, 0x9e, 0x21, 0x3a, 0xd7, 0x74, 0x22, 0x45, 0xd3, 0x47, 0xeb, 0xa0, 0x53, 0x77, 0x90, - 0xc6, 0xce, 0x71, 0x6c, 0x89, 0xba, 0x83, 0x24, 0xf2, 0x26, 0x2c, 0x38, 0x66, 0xd0, 0x3f, 0xa6, - 0x9e, 0x2f, 0x9d, 0x1a, 0x5f, 0x6d, 0xd7, 0x3c, 0xa0, 0x76, 0x53, 0x28, 0x49, 0x8c, 0xc2, 0x0d, - 0x58, 0x4c, 0x19, 0x8d, 0x36, 0x5f, 0x33, 0x41, 0x58, 0x54, 0x94, 0x64, 0x2a, 0xe0, 0xc7, 0x2a, - 0x2c, 0x73, 0xae, 0x76, 0xe0, 0x51, 0xd3, 0x89, 0x19, 0xef, 0x40, 0xb1, 0x7f, 0x1c, 0xba, 0x9f, - 0xa5, 0x28, 0x2f, 0xbc, 0x4a, 0x79, 0x97, 0x81, 0x24, 0x6f, 0x72, 0xc7, 0x94, 0x49, 0x73, 0x7f, - 0xc2, 0xa4, 0x5b, 0x80, 0xf8, 0xbd, 0x3f, 0x34, 0xed, 0x90, 0xfa, 0x91, 0xf7, 0xff, 0x0d, 0x60, - 0x33, 0x69, 0xcf, 0x35, 0x1d, 0xca, 0xbd, 0x5e, 0x20, 0x05, 0x2e, 0x69, 0x99, 0x0e, 0xc5, 0x9b, - 0xb0, 0x9c, 0xda, 0x24, 0xaf, 0x71, 0x05, 0x34, 0xb1, 0xeb, 0x73, 0x2e, 0xe7, 0xf7, 0x28, 0x90, - 0xa2, 0x3d, 0x81, 0xe2, 0x65, 0x58, 0xda, 0x8d, 0x68, 0xa2, 0xd3, 0xf0, 0x6d, 0x69, 0x83, 0x14, - 0x4a, 0xb6, 0x0a, 0x14, 0x27, 0x36, 0x44, 0x64, 0x10, 0x1b, 0xe1, 0x63, 0x04, 0x7a, 0xd7, 0xa7, - 0x5e, 0x3b, 0x30, 0x83, 0x98, 0xea, 0x47, 0x15, 0x96, 0x12, 0x42, 0x49, 0x75, 0x15, 0x4a, 0x96, - 0x7b, 0x44, 0xfd, 0xc0, 0x1a, 0xba, 0x3d, 0xcf, 0x0c, 0xc4, 0x95, 0x54, 0xb2, 0x18, 0x4b, 0x89, - 0x19, 0x50, 0x76, 0x6b, 0x37, 0x74, 0x7a, 0xb1, 0x17, 0xd5, 0xf5, 0x2c, 0x29, 0xb8, 0xa1, 0x23, - 0x9c, 0xc7, 0x52, 0xd2, 0x1c, 0x59, 0xbd, 0x29, 0xa6, 0x0c, 0x67, 0xd2, 0xcd, 0x91, 0xd5, 0x48, - 0x91, 0x55, 0x61, 0xd9, 0x0b, 0x6d, 0x3a, 0x0d, 0xcf, 0x72, 0xf8, 0x12, 0x53, 0xa5, 0xf0, 0xf8, - 0x53, 0x58, 0x66, 0x86, 0x37, 0xee, 0xa5, 0x4d, 0xbf, 0x00, 0xf3, 0xa1, 0x4f, 0xbd, 0x9e, 0x35, - 0x90, 0x61, 0xc8, 0xb3, 0x65, 0x63, 0x80, 0xae, 0x43, 0x96, 0x77, 0x06, 0x66, 0x66, 0x71, 0xe3, - 0x62, 0x14, 0xec, 0x57, 0x2e, 0x4f, 0x38, 0x0c, 0xef, 0x00, 0x62, 0x2a, 0x3f, 0xcd, 0xfe, 0x16, - 0xe4, 0x7c, 0x26, 0x90, 0x29, 0x77, 0x29, 0xc9, 0x32, 0x65, 0x09, 0x11, 0x48, 0xfc, 0x83, 0x0a, - 0x86, 0x68, 0x3f, 0xfe, 0xf6, 0xd0, 0x4b, 0xd6, 0x8c, 0xff, 0xa6, 0x6b, 0x77, 0x13, 0xb4, 0xa8, - 0x2a, 0x7b, 0x3e, 0x0d, 0x64, 0xfd, 0x9e, 0x9f, 0x55, 0xbf, 0x3e, 0x29, 0x46, 0xd0, 0x36, 0x0d, - 0x70, 0x03, 0x2a, 0x67, 0xda, 0x2c, 0x5d, 0x71, 0x0d, 0xf2, 0x0e, 0x87, 0x48, 0x5f, 0x94, 0xd2, - 0xbd, 0x96, 0x48, 0x2d, 0xfe, 0x49, 0x85, 0x73, 0x53, 0x15, 0xc9, 0xae, 0x70, 0xe8, 0x0d, 0x1d, - 0x19, 0xeb, 0x64, 0xb4, 0x4a, 0x4c, 0xde, 0x90, 0xe2, 0xc6, 0x20, 0x19, 0xce, 0xb9, 0x54, 0x38, - 0xef, 0x40, 0x9e, 0xa7, 0x76, 0xd4, 0x95, 0x96, 0x52, 0xb7, 0xda, 0x37, 0x2d, 0x6f, 0x6b, 0x45, - 0x3e, 0x38, 0x1a, 0x17, 0xd5, 0x06, 0xe6, 0x28, 0xa0, 0x1e, 0x91, 0xdb, 0xd0, 0xff, 0x20, 0x2f, - 0x3a, 0xc2, 0x5a, 0x96, 0x13, 0x2c, 0x46, 0x04, 0xc9, 0xa6, 0x21, 0x21, 0xf8, 0x5b, 0x15, 0x72, - 0xc2, 0xf4, 0x37, 0x15, 0xab, 0x32, 0x2c, 0x50, 0xb7, 0x3f, 0x1c, 0x58, 0xee, 0x11, 0x2f, 0x91, - 0x1c, 0x89, 0xd7, 0x08, 0xc9, 0xd4, 0x65, 0xb5, 0xa0, 0xc9, 0xfc, 0x5c, 0x83, 0xd5, 0x8e, 0x67, - 0xba, 0xfe, 0x21, 0xf5, 0xb8, 0x61, 0x71, 0x60, 0xf0, 0x97, 0x00, 0x13, 0x7f, 0x27, 0xfc, 0xa4, - 0xfe, 0x35, 0x3f, 0x55, 0x61, 0xde, 0x37, 0x9d, 0x91, 0x1d, 0xf7, 0xc9, 0x38, 0xd0, 0x6d, 0x2e, - 0x96, 0x9e, 0x8a, 0x40, 0xf8, 0x36, 0x14, 0x62, 0x6a, 0x66, 0x79, 0xdc, 0x11, 0x35, 0xc2, 0xbf, - 0xd1, 0x0a, 0xe4, 0x78, 0xbf, 0xe3, 0x8e, 0xd0, 0x88, 0x58, 0xe0, 0x1a, 0xe4, 0x05, 0xdf, 0x44, - 0x2f, 0x7a, 0x8e, 0x58, 0xb0, 0x5e, 0x39, 0xc3, 0x8b, 0xc5, 0x60, 0xe2, 0x42, 0x5c, 0x83, 0xc5, - 0x54, 0xaa, 0xa6, 0xde, 0x2e, 0xf5, 0xb5, 0xde, 0xae, 0x6f, 0xe6, 0xa0, 0x94, 0x9e, 0x15, 0xd0, - 0x6d, 0xc8, 0x06, 0x27, 0x23, 0x61, 0x4d, 0x69, 0xe3, 0xca, 0xec, 0x89, 0x42, 0x2e, 0x3b, 0x27, - 0x23, 0x4a, 0x38, 0x9c, 0x75, 0x63, 0x51, 0x00, 0xe2, 0x49, 0x10, 0xc9, 0x0b, 0x42, 0xc4, 0xda, - 0x31, 0x73, 0xcd, 0x31, 0xb5, 0x47, 0x3c, 0xa8, 0x05, 0xc2, 0xbf, 0x99, 0x2c, 0x74, 0xad, 0x60, - 0x2d, 0x27, 0x64, 0xec, 0x1b, 0x9f, 0x00, 0x4c, 0xc8, 0x51, 0x11, 0xe6, 0xbb, 0xad, 0x0f, 0x5a, - 0x7b, 0x0f, 0x5b, 0xba, 0xc2, 0x16, 0x77, 0xf7, 0xba, 0xad, 0x4e, 0x9d, 0xe8, 0x2a, 0x2a, 0x40, - 0x6e, 0xa7, 0xd6, 0xdd, 0xa9, 0xeb, 0x73, 0x68, 0x11, 0x0a, 0xf7, 0x1b, 0xed, 0xce, 0xde, 0x0e, - 0xa9, 0x35, 0xf5, 0x0c, 0x42, 0x50, 0xe2, 0x9a, 0x89, 0x2c, 0xcb, 0xb6, 0xb6, 0xbb, 0xcd, 0x66, - 0x8d, 0x7c, 0xac, 0xe7, 0xd8, 0x9c, 0xd3, 0x68, 0x6d, 0xef, 0xe9, 0x79, 0xa4, 0xc1, 0x42, 0xbb, - 0x53, 0xeb, 0xd4, 0xdb, 0xf5, 0x8e, 0x3e, 0x8f, 0x1b, 0x90, 0x17, 0x47, 0xff, 0xed, 0x2c, 0xc2, - 0x3d, 0xd0, 0x92, 0x2e, 0x47, 0x57, 0x53, 0x5e, 0x8d, 0xe9, 0xb8, 0x3a, 0xe1, 0xc5, 0x28, 0x7f, - 0x84, 0xfb, 0xa6, 0xf2, 0x27, 0xc3, 0x85, 0x32, 0x7f, 0xbe, 0x56, 0xa1, 0x34, 0x49, 0xfb, 0x6d, - 0xcb, 0xa6, 0xff, 0x44, 0x97, 0x29, 0xc3, 0xc2, 0xa1, 0x65, 0x53, 0x6e, 0x83, 0x38, 0x2e, 0x5e, - 0xcf, 0xac, 0xca, 0x55, 0x58, 0x89, 0xaa, 0xb2, 0xd3, 0xbe, 0xb7, 0x15, 0xd5, 0xe4, 0x7f, 0xdf, - 0x87, 0x42, 0x7c, 0x35, 0x16, 0xa9, 0xfa, 0x83, 0x6e, 0x6d, 0x57, 0x57, 0x58, 0xa4, 0x5a, 0x7b, - 0x9d, 0x9e, 0x58, 0xaa, 0xe8, 0x1c, 0x14, 0x49, 0x7d, 0xa7, 0xfe, 0x51, 0xaf, 0x59, 0xeb, 0xdc, - 0xbd, 0xaf, 0xcf, 0xb1, 0xd0, 0x09, 0x41, 0x6b, 0x4f, 0xca, 0x32, 0x1b, 0xa7, 0x39, 0x58, 0x88, - 0x6c, 0x67, 0xd9, 0xb9, 0x1f, 0xfa, 0xc7, 0x68, 0x65, 0xd6, 0x90, 0x5c, 0x3e, 0x3f, 0x25, 0x95, - 0x1d, 0x42, 0x41, 0x6f, 0x43, 0x8e, 0xcf, 0x55, 0x68, 0xe6, 0x9c, 0x5a, 0x9e, 0x3d, 0x7d, 0x62, - 0x05, 0xdd, 0x83, 0x62, 0x62, 0x1e, 0x3b, 0x63, 0xf7, 0xa5, 0x94, 0x34, 0x3d, 0xba, 0x61, 0xe5, - 0xa6, 0x8a, 0xee, 0x43, 0x31, 0x31, 0x0e, 0xa1, 0x72, 0x2a, 0x99, 0x52, 0x83, 0xd5, 0x84, 0x6b, - 0xc6, 0xfc, 0x84, 0x15, 0x54, 0x07, 0x98, 0x4c, 0x42, 0xe8, 0x62, 0x0a, 0x9c, 0x1c, 0x99, 0xca, - 0xe5, 0x59, 0xaa, 0x98, 0x66, 0x0b, 0x0a, 0xf1, 0x1c, 0x80, 0xd6, 0x66, 0x8c, 0x06, 0x82, 0xe4, - 0xec, 0xa1, 0x01, 0x2b, 0x68, 0x1b, 0xb4, 0x9a, 0x6d, 0xbf, 0x0e, 0x4d, 0x39, 0xa9, 0xf1, 0xa7, - 0x79, 0x6c, 0xb8, 0x70, 0xc6, 0xd3, 0x8b, 0xae, 0xa5, 0x9b, 0xcf, 0x59, 0xf3, 0x44, 0xf9, 0x3f, - 0x7f, 0x88, 0x8b, 0x4f, 0x6b, 0x42, 0x29, 0xfd, 0x8c, 0xa0, 0xb3, 0xc6, 0xe8, 0xb2, 0x11, 0x2b, - 0x66, 0xbf, 0x3b, 0xca, 0x3a, 0x8b, 0xac, 0x96, 0xcc, 0x7f, 0xb4, 0xfa, 0x2a, 0x19, 0x2b, 0xcd, - 0xf2, 0xbf, 0xa6, 0xb9, 0x92, 0xd5, 0xc2, 0x98, 0xb6, 0xde, 0x7d, 0xfa, 0xc2, 0x50, 0x9e, 0xbd, - 0x30, 0x94, 0x97, 0x2f, 0x0c, 0xf5, 0xab, 0xb1, 0xa1, 0x7e, 0x3f, 0x36, 0xd4, 0x27, 0x63, 0x43, - 0x7d, 0x3a, 0x36, 0xd4, 0x5f, 0xc6, 0x86, 0xfa, 0xeb, 0xd8, 0x50, 0x5e, 0x8e, 0x0d, 0xf5, 0xf1, - 0xa9, 0xa1, 0x3c, 0x3d, 0x35, 0x94, 0x67, 0xa7, 0x86, 0xf2, 0x49, 0xbe, 0x6f, 0x5b, 0xd4, 0x0d, - 0x0e, 0xf2, 0xfc, 0x5f, 0xf5, 0xd6, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5b, 0xa2, 0xf3, 0x2b, - 0xf2, 0x0e, 0x00, 0x00, + 0x16, 0x26, 0xad, 0x87, 0xad, 0x23, 0x59, 0xa6, 0xc7, 0x8e, 0xa3, 0x28, 0xf7, 0x52, 0xc9, 0x00, + 0xc9, 0x35, 0xee, 0xbd, 0x71, 0x52, 0x07, 0x69, 0xbd, 0x68, 0x11, 0xc8, 0x89, 0xec, 0xa8, 0xb5, + 0x64, 0x67, 0x24, 0x37, 0x6d, 0x81, 0x42, 0xa0, 0xa5, 0xb1, 0x4d, 0x94, 0xa4, 0x14, 0x3e, 0x8a, + 0x7a, 0x51, 0xa0, 0x40, 0x97, 0x5d, 0x34, 0xcb, 0xfe, 0x84, 0xae, 0xbb, 0x29, 0xba, 0xed, 0x2a, + 0xcb, 0x2c, 0x83, 0x2e, 0x82, 0xc6, 0xd9, 0x74, 0x19, 0xf4, 0x17, 0x14, 0xf3, 0x20, 0x45, 0xd2, + 0x52, 0x9b, 0x3e, 0xb2, 0xe3, 0x9c, 0xc7, 0x37, 0x67, 0xbe, 0x39, 0xe7, 0xcc, 0x21, 0x94, 0xfa, + 0x43, 0xd7, 0xa7, 0x9f, 0xad, 0x8d, 0xdc, 0xa1, 0x3f, 0x44, 0x79, 0xb1, 0xaa, 0x5e, 0x3b, 0x32, + 0xfd, 0xe3, 0xe0, 0x60, 0xad, 0x3f, 0xb4, 0xaf, 0x1f, 0x0d, 0x8f, 0x86, 0xd7, 0xb9, 0xfa, 0x20, + 0x38, 0xe4, 0x2b, 0xbe, 0xe0, 0x5f, 0xc2, 0x0d, 0xff, 0xaa, 0x42, 0xe9, 0x81, 0x6b, 0xfa, 0x94, + 0xd0, 0x87, 0x01, 0xf5, 0x7c, 0xd4, 0x06, 0xf0, 0x4d, 0x9b, 0x7a, 0xd4, 0x35, 0xa9, 0x57, 0x51, + 0x2f, 0x65, 0x56, 0x8b, 0xeb, 0x68, 0x4d, 0x6e, 0xd5, 0x35, 0x6d, 0xda, 0xe1, 0x9a, 0xcd, 0xea, + 0xe3, 0x67, 0x35, 0xe5, 0xa7, 0x67, 0x35, 0xb4, 0xe7, 0x52, 0xc3, 0xb2, 0x86, 0xfd, 0x6e, 0xe4, + 0x45, 0x62, 0x08, 0xe8, 0x2d, 0xc8, 0x77, 0x86, 0x81, 0xdb, 0xa7, 0x95, 0x99, 0x4b, 0xea, 0x6a, + 0x79, 0xbd, 0x16, 0x62, 0xc5, 0x77, 0x5d, 0x13, 0x26, 0x0d, 0x27, 0xb0, 0x49, 0xde, 0xe3, 0xdf, + 0x68, 0x03, 0xe6, 0x6c, 0xea, 0x1b, 0x03, 0xc3, 0x37, 0x2a, 0x19, 0x1e, 0xc6, 0x4a, 0xe8, 0xda, + 0xa2, 0xbe, 0x6b, 0xf6, 0x5b, 0x52, 0xbb, 0x99, 0x7d, 0xfc, 0xac, 0xa6, 0x92, 0xc8, 0x1a, 0xd7, + 0x00, 0xc6, 0x78, 0x68, 0x16, 0x32, 0xf5, 0xbd, 0xa6, 0xa6, 0xa0, 0x39, 0xc8, 0x92, 0xfd, 0x9d, + 0x86, 0xa6, 0xe2, 0x05, 0x98, 0x97, 0xbb, 0x7b, 0xa3, 0xa1, 0xe3, 0x51, 0xfc, 0x0e, 0x14, 0x09, + 0x35, 0x06, 0x21, 0x07, 0x6b, 0x30, 0xfb, 0x30, 0x88, 0x13, 0xb0, 0x1c, 0xee, 0x7c, 0x3f, 0xa0, + 0xee, 0x89, 0x34, 0x23, 0xa1, 0x11, 0xbe, 0x0d, 0x25, 0xe1, 0x2e, 0xe0, 0xd0, 0x75, 0x98, 0x75, + 0xa9, 0x17, 0x58, 0x7e, 0xe8, 0x7f, 0x2e, 0xe5, 0x2f, 0xec, 0x48, 0x68, 0x85, 0xbf, 0x51, 0xa1, + 0x14, 0x87, 0x46, 0xff, 0x07, 0xe4, 0xf9, 0x86, 0xeb, 0xf7, 0x38, 0x93, 0xbe, 0x61, 0x8f, 0x7a, + 0x36, 0x03, 0x53, 0x57, 0x33, 0x44, 0xe3, 0x9a, 0x6e, 0xa8, 0x68, 0x79, 0x68, 0x15, 0x34, 0xea, + 0x0c, 0x92, 0xb6, 0x33, 0xdc, 0xb6, 0x4c, 0x9d, 0x41, 0xdc, 0xf2, 0x06, 0xcc, 0xd9, 0x86, 0xdf, + 0x3f, 0xa6, 0xae, 0x27, 0x49, 0x8d, 0x8e, 0xb6, 0x63, 0x1c, 0x50, 0xab, 0x25, 0x94, 0x24, 0xb2, + 0xc2, 0x4d, 0x98, 0x4f, 0x04, 0x8d, 0x36, 0x5e, 0x31, 0x41, 0xd8, 0xad, 0x28, 0xf1, 0x54, 0xc0, + 0x8f, 0x54, 0x58, 0xe2, 0x58, 0x1d, 0xdf, 0xa5, 0x86, 0x1d, 0x21, 0xde, 0x86, 0x62, 0xff, 0x38, + 0x70, 0x3e, 0x49, 0x40, 0x9e, 0x3f, 0x0b, 0x79, 0x87, 0x19, 0x49, 0xdc, 0xb8, 0x47, 0x2a, 0xa4, + 0x99, 0x3f, 0x11, 0xd2, 0x4d, 0x40, 0xfc, 0xdc, 0xef, 0x1b, 0x56, 0x40, 0xbd, 0x90, 0xfd, 0x7f, + 0x03, 0x58, 0x4c, 0xda, 0x73, 0x0c, 0x9b, 0x72, 0xd6, 0x0b, 0xa4, 0xc0, 0x25, 0x6d, 0xc3, 0xa6, + 0x78, 0x03, 0x96, 0x12, 0x4e, 0xf2, 0x18, 0x97, 0xa1, 0x24, 0xbc, 0x3e, 0xe5, 0x72, 0x7e, 0x8e, + 0x02, 0x29, 0x5a, 0x63, 0x53, 0xbc, 0x04, 0x8b, 0x3b, 0x21, 0x4c, 0xb8, 0x1b, 0xbe, 0x25, 0x63, + 0x90, 0x42, 0x89, 0x56, 0x83, 0xe2, 0x38, 0x86, 0x10, 0x0c, 0xa2, 0x20, 0x3c, 0x8c, 0x40, 0xdb, + 0xf7, 0xa8, 0xdb, 0xf1, 0x0d, 0x3f, 0x82, 0xfa, 0x5e, 0x85, 0xc5, 0x98, 0x50, 0x42, 0x5d, 0x81, + 0xb2, 0xe9, 0x1c, 0x51, 0xcf, 0x37, 0x87, 0x4e, 0xcf, 0x35, 0x7c, 0x71, 0x24, 0x95, 0xcc, 0x47, + 0x52, 0x62, 0xf8, 0x94, 0x9d, 0xda, 0x09, 0xec, 0x5e, 0xc4, 0xa2, 0xba, 0x9a, 0x25, 0x05, 0x27, + 0xb0, 0x05, 0x79, 0x2c, 0x25, 0x8d, 0x91, 0xd9, 0x4b, 0x21, 0x65, 0x38, 0x92, 0x66, 0x8c, 0xcc, + 0x66, 0x02, 0x6c, 0x0d, 0x96, 0xdc, 0xc0, 0xa2, 0x69, 0xf3, 0x2c, 0x37, 0x5f, 0x64, 0xaa, 0x84, + 0x3d, 0xfe, 0x18, 0x96, 0x58, 0xe0, 0xcd, 0xbb, 0xc9, 0xd0, 0xcf, 0xc3, 0x6c, 0xe0, 0x51, 0xb7, + 0x67, 0x0e, 0xe4, 0x35, 0xe4, 0xd9, 0xb2, 0x39, 0x40, 0xd7, 0x20, 0xcb, 0x3b, 0x03, 0x0b, 0xb3, + 0xb8, 0x7e, 0x21, 0xbc, 0xec, 0x33, 0x87, 0x27, 0xdc, 0x0c, 0x6f, 0x03, 0x62, 0x2a, 0x2f, 0x89, + 0xfe, 0x06, 0xe4, 0x3c, 0x26, 0x90, 0x29, 0x77, 0x31, 0x8e, 0x92, 0x8a, 0x84, 0x08, 0x4b, 0xfc, + 0x9d, 0x0a, 0xba, 0x68, 0x3f, 0xde, 0xd6, 0xd0, 0x8d, 0xd7, 0x8c, 0xf7, 0xba, 0x6b, 0x77, 0x03, + 0x4a, 0x61, 0x55, 0xf6, 0x3c, 0xea, 0xcb, 0xfa, 0x3d, 0x37, 0xa9, 0x7e, 0x3d, 0x52, 0x0c, 0x4d, + 0x3b, 0xd4, 0xc7, 0x4d, 0xa8, 0x4d, 0x8d, 0x59, 0x52, 0x71, 0x15, 0xf2, 0x36, 0x37, 0x91, 0x5c, + 0x94, 0x93, 0xbd, 0x96, 0x48, 0x2d, 0xae, 0xc0, 0x8a, 0x84, 0x0a, 0xdb, 0x6f, 0x98, 0x7b, 0x2d, + 0x38, 0x7f, 0x46, 0x23, 0xc1, 0xd7, 0x63, 0xad, 0x5c, 0xfd, 0xbd, 0x56, 0x1e, 0x6b, 0xe2, 0x3f, + 0xaa, 0xb0, 0x90, 0x2a, 0x7d, 0xc6, 0xd5, 0xa1, 0x3b, 0xb4, 0x65, 0x52, 0xc5, 0xd3, 0xa2, 0xcc, + 0xe4, 0x4d, 0x29, 0x6e, 0x0e, 0xe2, 0x79, 0x33, 0x93, 0xc8, 0x9b, 0xdb, 0x90, 0xe7, 0x35, 0x14, + 0xb6, 0xbf, 0xc5, 0x04, 0x7d, 0x7b, 0x86, 0xe9, 0x6e, 0x2e, 0xcb, 0x97, 0xad, 0xc4, 0x45, 0xf5, + 0x81, 0x31, 0xf2, 0xa9, 0x4b, 0xa4, 0x1b, 0xfa, 0x1f, 0xe4, 0x45, 0xeb, 0xa9, 0x64, 0x39, 0xc0, + 0x7c, 0x08, 0x10, 0xef, 0x4e, 0xd2, 0x04, 0x7f, 0xad, 0x42, 0x4e, 0x84, 0xfe, 0xba, 0x92, 0xa2, + 0x0a, 0x73, 0xd4, 0xe9, 0x0f, 0x07, 0xa6, 0x73, 0xc4, 0x6b, 0x31, 0x47, 0xa2, 0x35, 0x42, 0xb2, + 0x46, 0x58, 0xd1, 0x95, 0x64, 0x21, 0x54, 0x60, 0xa5, 0xeb, 0x1a, 0x8e, 0x77, 0x48, 0x5d, 0x1e, + 0x58, 0x94, 0x01, 0xf8, 0x73, 0x80, 0x31, 0xdf, 0x31, 0x9e, 0xd4, 0xbf, 0xc6, 0xd3, 0x1a, 0xcc, + 0x7a, 0x86, 0x3d, 0xb2, 0xa2, 0x86, 0x1c, 0x65, 0x54, 0x87, 0x8b, 0x25, 0x53, 0xa1, 0x11, 0xbe, + 0x05, 0x85, 0x08, 0x9a, 0x45, 0x1e, 0xb5, 0xde, 0x12, 0xe1, 0xdf, 0x68, 0x19, 0x72, 0xbc, 0xb1, + 0x72, 0x22, 0x4a, 0x44, 0x2c, 0x70, 0x1d, 0xf2, 0x02, 0x6f, 0xac, 0x17, 0xcd, 0x4d, 0x2c, 0x58, + 0x53, 0x9e, 0xc0, 0x62, 0xd1, 0x1f, 0x53, 0x88, 0xeb, 0x30, 0x9f, 0xa8, 0x89, 0xc4, 0x23, 0xa9, + 0xbe, 0xd2, 0x23, 0xf9, 0xd5, 0x0c, 0x94, 0x93, 0x99, 0x8c, 0x6e, 0x41, 0xd6, 0x3f, 0x19, 0x89, + 0x68, 0xca, 0xeb, 0x97, 0x27, 0xe7, 0xbb, 0x5c, 0x76, 0x4f, 0x46, 0x94, 0x70, 0x73, 0xd6, 0xf6, + 0x45, 0xa5, 0x89, 0xb7, 0x47, 0x24, 0x2f, 0x08, 0x11, 0xeb, 0xfb, 0x8c, 0x9a, 0x63, 0x6a, 0x8d, + 0xf8, 0xa5, 0x16, 0x08, 0xff, 0x66, 0xb2, 0xc0, 0x31, 0xfd, 0x4a, 0x4e, 0xc8, 0xd8, 0x37, 0x3e, + 0x01, 0x18, 0x83, 0xa3, 0x22, 0xcc, 0xee, 0xb7, 0xdf, 0x6b, 0xef, 0x3e, 0x68, 0x6b, 0x0a, 0x5b, + 0xdc, 0xd9, 0xdd, 0x6f, 0x77, 0x1b, 0x44, 0x53, 0x51, 0x01, 0x72, 0xdb, 0xf5, 0xfd, 0xed, 0x86, + 0x36, 0x83, 0xe6, 0xa1, 0x70, 0xaf, 0xd9, 0xe9, 0xee, 0x6e, 0x93, 0x7a, 0x4b, 0xcb, 0x20, 0x04, + 0x65, 0xae, 0x19, 0xcb, 0xb2, 0xcc, 0xb5, 0xb3, 0xdf, 0x6a, 0xd5, 0xc9, 0x87, 0x5a, 0x8e, 0x0d, + 0x54, 0xcd, 0xf6, 0xd6, 0xae, 0x96, 0x47, 0x25, 0x98, 0xeb, 0x74, 0xeb, 0xdd, 0x46, 0xa7, 0xd1, + 0xd5, 0x66, 0x71, 0x13, 0xf2, 0x62, 0xeb, 0xbf, 0x9d, 0x45, 0xb8, 0x07, 0xa5, 0x38, 0xe5, 0xe8, + 0x4a, 0x82, 0xd5, 0x08, 0x8e, 0xab, 0x63, 0x2c, 0x86, 0xf9, 0x23, 0xe8, 0x4b, 0xe5, 0x4f, 0x86, + 0x0b, 0x65, 0xfe, 0x7c, 0xa9, 0x42, 0x79, 0x9c, 0xf6, 0x5b, 0xa6, 0x45, 0xff, 0x89, 0x2e, 0x53, + 0x85, 0xb9, 0x43, 0xd3, 0xa2, 0x3c, 0x06, 0xb1, 0x5d, 0xb4, 0x9e, 0x58, 0x95, 0x2b, 0xb0, 0x1c, + 0x56, 0x65, 0xb7, 0x73, 0x77, 0x33, 0xac, 0xc9, 0xff, 0xbe, 0x0b, 0x85, 0xe8, 0x68, 0xec, 0xa6, + 0x1a, 0xf7, 0xf7, 0xeb, 0x3b, 0x9a, 0xc2, 0x6e, 0xaa, 0xbd, 0xdb, 0xed, 0x89, 0xa5, 0x8a, 0x16, + 0xa0, 0x48, 0x1a, 0xdb, 0x8d, 0x0f, 0x7a, 0xad, 0x7a, 0xf7, 0xce, 0x3d, 0x6d, 0x86, 0x5d, 0x9d, + 0x10, 0xb4, 0x77, 0xa5, 0x2c, 0xb3, 0xfe, 0x43, 0x1e, 0xe6, 0xc2, 0xd8, 0x59, 0x76, 0xee, 0x05, + 0xde, 0x31, 0x5a, 0x9e, 0x34, 0x8d, 0x57, 0xcf, 0xa5, 0xa4, 0xb2, 0x43, 0x28, 0xe8, 0x4d, 0xc8, + 0xf1, 0x01, 0x0e, 0x4d, 0x1c, 0x88, 0xab, 0x93, 0xc7, 0x5c, 0xac, 0xa0, 0xbb, 0x50, 0x8c, 0x0d, + 0x7e, 0x53, 0xbc, 0x2f, 0x26, 0xa4, 0xc9, 0x19, 0x11, 0x2b, 0x37, 0x54, 0x74, 0x0f, 0x8a, 0xb1, + 0xb9, 0x0b, 0x55, 0x13, 0xc9, 0x94, 0x98, 0xe0, 0xc6, 0x58, 0x13, 0x06, 0x35, 0xac, 0xa0, 0x06, + 0xc0, 0x78, 0xe4, 0x42, 0x17, 0x12, 0xc6, 0xf1, 0xd9, 0xac, 0x5a, 0x9d, 0xa4, 0x8a, 0x60, 0x36, + 0xa1, 0x10, 0x0d, 0x1c, 0xa8, 0x32, 0x61, 0x06, 0x11, 0x20, 0xd3, 0xa7, 0x13, 0xac, 0xa0, 0x2d, + 0x28, 0xd5, 0x2d, 0xeb, 0x55, 0x60, 0xaa, 0x71, 0x8d, 0x97, 0xc6, 0xb1, 0xa2, 0xe7, 0x37, 0xfd, + 0xc6, 0xa3, 0xab, 0xc9, 0xe6, 0x33, 0x6d, 0x70, 0xa9, 0xfe, 0xe7, 0x0f, 0xed, 0xa2, 0xdd, 0xba, + 0xb0, 0x90, 0x7a, 0xec, 0x91, 0x9e, 0xf2, 0x4e, 0xcd, 0x07, 0xd5, 0xda, 0x54, 0x7d, 0x84, 0xda, + 0x82, 0x72, 0xf2, 0x71, 0x42, 0xd3, 0xfe, 0x02, 0xaa, 0xd1, 0x6e, 0x53, 0x5e, 0x33, 0x65, 0x95, + 0xe5, 0x4b, 0x29, 0x5e, 0x55, 0x68, 0xe5, 0x2c, 0x18, 0x2b, 0xf8, 0xea, 0xbf, 0xd2, 0x58, 0xf1, + 0x1a, 0x64, 0x48, 0x9b, 0x6f, 0x3f, 0x79, 0xae, 0x2b, 0x4f, 0x9f, 0xeb, 0xca, 0xcb, 0xe7, 0xba, + 0xfa, 0xc5, 0xa9, 0xae, 0x7e, 0x7b, 0xaa, 0xab, 0x8f, 0x4f, 0x75, 0xf5, 0xc9, 0xa9, 0xae, 0xfe, + 0x7c, 0xaa, 0xab, 0xbf, 0x9c, 0xea, 0xca, 0xcb, 0x53, 0x5d, 0x7d, 0xf4, 0x42, 0x57, 0x9e, 0xbc, + 0xd0, 0x95, 0xa7, 0x2f, 0x74, 0xe5, 0xa3, 0x7c, 0xdf, 0x32, 0xa9, 0xe3, 0x1f, 0xe4, 0xf9, 0xaf, + 0xf6, 0xcd, 0xdf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0xad, 0x00, 0xad, 0xb1, 0x0f, 0x00, 0x00, } func (x MatchType) String() string { @@ -2174,6 +2256,56 @@ func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { } return true } +func (this *MetricsMetadataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricsMetadataRequest) + if !ok { + that2, ok := that.(MetricsMetadataRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *MetricsMetadataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricsMetadataResponse) + if !ok { + that2, ok := that.(MetricsMetadataResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Metadata) != len(that1.Metadata) { + return false + } + for i := range this.Metadata { + if !this.Metadata[i].Equal(that1.Metadata[i]) { + return false + } + } + return true +} func (this *TimeSeriesChunk) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2749,6 +2881,27 @@ func (this *MetricsForLabelMatchersResponse) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *MetricsMetadataRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.MetricsMetadataRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MetricsMetadataResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.MetricsMetadataResponse{") + if this.Metadata != nil { + s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *TimeSeriesChunk) GoString() string { if this == nil { return "nil" @@ -2927,6 +3080,7 @@ type IngesterClient interface { UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) + MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) // TransferTSDB transfers all files of a tsdb to a joining ingester @@ -3036,6 +3190,15 @@ func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *Metric return out, nil } +func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) { + out := new(MetricsMetadataResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsMetadata", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/TransferChunks", opts...) if err != nil { @@ -3114,6 +3277,7 @@ type IngesterServer interface { UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) + MetricsMetadata(context.Context, *MetricsMetadataRequest) (*MetricsMetadataResponse, error) // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). TransferChunks(Ingester_TransferChunksServer) error // TransferTSDB transfers all files of a tsdb to a joining ingester @@ -3148,6 +3312,9 @@ func (*UnimplementedIngesterServer) AllUserStats(ctx context.Context, req *UserS func (*UnimplementedIngesterServer) MetricsForLabelMatchers(ctx context.Context, req *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchers not implemented") } +func (*UnimplementedIngesterServer) MetricsMetadata(ctx context.Context, req *MetricsMetadataRequest) (*MetricsMetadataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MetricsMetadata not implemented") +} func (*UnimplementedIngesterServer) TransferChunks(srv Ingester_TransferChunksServer) error { return status.Errorf(codes.Unimplemented, "method TransferChunks not implemented") } @@ -3306,6 +3473,24 @@ func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _Ingester_MetricsMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsMetadataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).MetricsMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/MetricsMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).MetricsMetadata(ctx, req.(*MetricsMetadataRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) } @@ -3390,6 +3575,10 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{ MethodName: "MetricsForLabelMatchers", Handler: _Ingester_MetricsForLabelMatchers_Handler, }, + { + MethodName: "MetricsMetadata", + Handler: _Ingester_MetricsMetadata_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -4048,6 +4237,66 @@ func (m *MetricsForLabelMatchersResponse) MarshalToSizedBuffer(dAtA []byte) (int return len(dAtA) - i, nil } +func (m *MetricsMetadataRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsMetadataRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsMetadataRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MetricsMetadataResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsMetadataResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsMetadataResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metadata) > 0 { + for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4823,6 +5072,30 @@ func (m *MetricsForLabelMatchersResponse) Size() (n int) { return n } +func (m *MetricsMetadataRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MetricsMetadataResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metadata) > 0 { + for _, e := range m.Metadata { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + func (m *TimeSeriesChunk) Size() (n int) { if m == nil { return 0 @@ -5278,6 +5551,30 @@ func (this *MetricsForLabelMatchersResponse) String() string { }, "") return s } +func (this *MetricsMetadataRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricsMetadataRequest{`, + `}`, + }, "") + return s +} +func (this *MetricsMetadataResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetadata := "[]*MetricMetadata{" + for _, f := range this.Metadata { + repeatedStringForMetadata += strings.Replace(f.String(), "MetricMetadata", "MetricMetadata", 1) + "," + } + repeatedStringForMetadata += "}" + s := strings.Join([]string{`&MetricsMetadataResponse{`, + `Metadata:` + repeatedStringForMetadata + `,`, + `}`, + }, "") + return s +} func (this *TimeSeriesChunk) String() string { if this == nil { return "nil" @@ -7022,6 +7319,146 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsMetadataRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricsMetadataResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsMetadataResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsMetadataResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata, &MetricMetadata{}) + if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto index 6a341b599831..ef52580b7317 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto @@ -19,6 +19,7 @@ service Ingester { rpc UserStats(UserStatsRequest) returns (UserStatsResponse) {}; rpc AllUserStats(UserStatsRequest) returns (UsersStatsResponse) {}; rpc MetricsForLabelMatchers(MetricsForLabelMatchersRequest) returns (MetricsForLabelMatchersResponse) {}; + rpc MetricsMetadata(MetricsMetadataRequest) returns (MetricsMetadataResponse) {}; // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; @@ -106,6 +107,13 @@ message MetricsForLabelMatchersResponse { repeated Metric metric = 1; } +message MetricsMetadataRequest { +} + +message MetricsMetadataResponse { + repeated MetricMetadata metadata = 1; +} + message TimeSeriesChunk { string from_ingester_id = 1; string user_id = 2; diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go index 43eb996c3b41..1caf36087ad3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + tsdb_record "github.com/prometheus/prometheus/tsdb/record" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" "google.golang.org/grpc/codes" @@ -32,6 +33,13 @@ import ( const ( // Number of timeseries to return in each batch of a QueryStream. queryStreamBatchSize = 128 + + // Discarded Metadata metric labels. + perUserMetadataLimit = "per_user_metadata_limit" + perMetricMetadataLimit = "per_metric_metadata_limit" + + // Period at which to attempt purging metadata from memory. + metadataPurgePeriod = 5 * time.Minute ) var ( @@ -58,6 +66,9 @@ type Config struct { ConcurrentFlushes int `yaml:"concurrent_flushes"` SpreadFlushes bool `yaml:"spread_flushes"` + // Config for metadata purging. + MetadataRetainPeriod time.Duration `yaml:"metadata_retain_period"` + RateUpdatePeriod time.Duration `yaml:"rate_update_period"` // Use tsdb block storage @@ -78,15 +89,19 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.WALConfig.RegisterFlags(f) f.IntVar(&cfg.MaxTransferRetries, "ingester.max-transfer-retries", 10, "Number of times to try and transfer chunks before falling back to flushing. Negative value or zero disables hand-over.") + f.DurationVar(&cfg.FlushCheckPeriod, "ingester.flush-period", 1*time.Minute, "Period with which to attempt to flush chunks.") f.DurationVar(&cfg.RetainPeriod, "ingester.retain-period", 5*time.Minute, "Period chunks will remain in memory after flushing.") - f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 1*time.Minute, "Timeout for individual flush operations.") f.DurationVar(&cfg.MaxChunkIdle, "ingester.max-chunk-idle", 5*time.Minute, "Maximum chunk idle time before flushing.") f.DurationVar(&cfg.MaxStaleChunkIdle, "ingester.max-stale-chunk-idle", 2*time.Minute, "Maximum chunk idle time for chunks terminating in stale markers before flushing. 0 disables it and a stale series is not flushed until the max-chunk-idle timeout is reached.") + f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 1*time.Minute, "Timeout for individual flush operations.") f.DurationVar(&cfg.MaxChunkAge, "ingester.max-chunk-age", 12*time.Hour, "Maximum chunk age before flushing.") f.DurationVar(&cfg.ChunkAgeJitter, "ingester.chunk-age-jitter", 0, "Range of time to subtract from -ingester.max-chunk-age to spread out flushes") - f.BoolVar(&cfg.SpreadFlushes, "ingester.spread-flushes", true, "If true, spread series flushes across the whole period of -ingester.max-chunk-age.") f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushes", 50, "Number of concurrent goroutines flushing to dynamodb.") + f.BoolVar(&cfg.SpreadFlushes, "ingester.spread-flushes", true, "If true, spread series flushes across the whole period of -ingester.max-chunk-age.") + + f.DurationVar(&cfg.MetadataRetainPeriod, "ingester.metadata-retain-period", 10*time.Minute, "Period at which metadata we have not seen will remain in memory before being deleted.") + f.DurationVar(&cfg.RateUpdatePeriod, "ingester.rate-update-period", 15*time.Second, "Period with which to update the per-user ingestion rates.") } @@ -110,6 +125,10 @@ type Ingester struct { userStates *userStates stopped bool // protected by userStatesMtx + // For storing metadata ingested. + usersMetadataMtx sync.RWMutex + usersMetadata map[string]*userMetricsMetadata + // One queue per flush thread. Fingerprint is used to // pick a queue. flushQueues []*util.PriorityQueue @@ -153,7 +172,7 @@ func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, c recordPool = sync.Pool{ New: func() interface{} { - return &Record{} + return &WALRecord{} }, } } @@ -168,10 +187,12 @@ func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, c cfg: cfg, clientConfig: clientConfig, metrics: newIngesterMetrics(registerer, true), - limits: limits, - chunkStore: chunkStore, - flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), - registerer: registerer, + + limits: limits, + chunkStore: chunkStore, + flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), + usersMetadata: map[string]*userMetricsMetadata{}, + registerer: registerer, } var err error @@ -284,8 +305,14 @@ func (i *Ingester) loop(ctx context.Context) error { rateUpdateTicker := time.NewTicker(i.cfg.RateUpdatePeriod) defer rateUpdateTicker.Stop() + metadataPurgeTicker := time.NewTicker(metadataPurgePeriod) + defer metadataPurgeTicker.Stop() + for { select { + case <-metadataPurgeTicker.C: + i.purgeUserMetricsMetadata() + case <-flushTicker.C: i.sweepUsers(false) @@ -366,26 +393,25 @@ func (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client. return nil, fmt.Errorf("no user id") } + // Given metadata is a best-effort approach, and we don't halt on errors + // process it before samples. Otherwise, we risk returning an error before ingestion. + i.pushMetadata(ctx, userID, req.GetMetadata()) + var firstPartialErr *validationError - var record *Record + var record *WALRecord if i.cfg.WALConfig.WALEnabled { - record = recordPool.Get().(*Record) - record.UserId = userID + record = recordPool.Get().(*WALRecord) + record.UserID = userID // Assuming there is not much churn in most cases, there is no use // keeping the record.Labels slice hanging around. - record.Labels = nil + record.Series = nil if cap(record.Samples) < len(req.Timeseries) { - record.Samples = make([]Sample, 0, len(req.Timeseries)) + record.Samples = make([]tsdb_record.RefSample, 0, len(req.Timeseries)) } else { record.Samples = record.Samples[:0] } } - if len(req.Metadata) > 0 { - logger := util.WithContext(ctx, util.Logger) - level.Debug(logger).Log("msg", "metadata received in the ingester", "count", len(req.Metadata)) - } - for _, ts := range req.Timeseries { for _, s := range ts.Samples { // append() copies the memory in `ts.Labels` except on the error path @@ -425,7 +451,7 @@ func (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client. // NOTE: memory for `labels` is unsafe; anything retained beyond the // life of this function must be copied -func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, timestamp model.Time, value model.SampleValue, source client.WriteRequest_SourceEnum, record *Record) error { +func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, timestamp model.Time, value model.SampleValue, source client.WriteRequest_SourceEnum, record *WALRecord) error { labels.removeBlanks() var ( @@ -481,10 +507,10 @@ func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, } if record != nil { - record.Samples = append(record.Samples, Sample{ - Fingerprint: uint64(fp), - Timestamp: uint64(timestamp), - Value: float64(value), + record.Samples = append(record.Samples, tsdb_record.RefSample{ + Ref: uint64(fp), + T: int64(timestamp), + V: float64(value), }) } @@ -502,6 +528,92 @@ func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, return err } +func (i *Ingester) pushMetadata(ctx context.Context, userID string, metadata []*client.MetricMetadata) { + var firstMetadataErr error + for _, metadata := range metadata { + err := i.appendMetadata(userID, metadata) + if err == nil { + i.metrics.ingestedMetadata.Inc() + continue + } + + i.metrics.ingestedMetadataFail.Inc() + if firstMetadataErr == nil { + firstMetadataErr = err + } + } + + // If we have any error with regard to metadata we just log and no-op. + // We consider metadata a best effort approach, errors here should not stop processing. + if firstMetadataErr != nil { + logger := util.WithContext(ctx, util.Logger) + level.Warn(logger).Log("msg", "failed to ingest some metadata", "err", firstMetadataErr) + } +} + +func (i *Ingester) appendMetadata(userID string, m *client.MetricMetadata) error { + i.userStatesMtx.RLock() + if i.stopped { + i.userStatesMtx.RUnlock() + return fmt.Errorf("ingester stopping") + } + i.userStatesMtx.RUnlock() + + userMetadata := i.getOrCreateUserMetadata(userID) + + return userMetadata.add(m.GetMetricName(), m) +} + +func (i *Ingester) getOrCreateUserMetadata(userID string) *userMetricsMetadata { + userMetadata := i.getUserMetadata(userID) + if userMetadata != nil { + return userMetadata + } + + i.usersMetadataMtx.Lock() + defer i.usersMetadataMtx.Unlock() + + // Ensure it was not created between switching locks. + userMetadata, ok := i.usersMetadata[userID] + if !ok { + userMetadata = newMetadataMap(i.limiter, i.metrics, userID) + i.usersMetadata[userID] = userMetadata + } + return userMetadata +} + +func (i *Ingester) getUserMetadata(userID string) *userMetricsMetadata { + i.usersMetadataMtx.RLock() + defer i.usersMetadataMtx.RUnlock() + return i.usersMetadata[userID] +} + +func (i *Ingester) getUsersWithMetadata() []string { + i.usersMetadataMtx.RLock() + defer i.usersMetadataMtx.RUnlock() + + userIDs := make([]string, 0, len(i.usersMetadata)) + for userID := range i.usersMetadata { + userIDs = append(userIDs, userID) + } + + return userIDs +} + +func (i *Ingester) purgeUserMetricsMetadata() { + deadline := time.Now().Add(-i.cfg.MetadataRetainPeriod) + + for _, userID := range i.getUsersWithMetadata() { + metadata := i.getUserMetadata(userID) + if metadata == nil { + continue + } + + // Remove all metadata that we no longer need to retain. + metadata.purge(deadline) + } +} + // Query implements service.IngesterServer func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client.QueryResponse, error) { if err := i.checkRunningOrStopping(); err != nil { @@ -746,6 +858,29 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr return result, nil } +// MetricsMetadata returns all the metric metadata of a user. +func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) (*client.MetricsMetadataResponse, error) { + i.userStatesMtx.RLock() + if err := i.checkRunningOrStopping(); err != nil { + i.userStatesMtx.RUnlock() + return nil, err + } + i.userStatesMtx.RUnlock() + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, fmt.Errorf("no user id") + } + + userMetadata := i.getUserMetadata(userID) + + if userMetadata == nil { + return &client.MetricsMetadataResponse{}, nil + } + + return &client.MetricsMetadataResponse{Metadata: userMetadata.toClientMetadata()}, nil +} + // UserStats returns ingestion statistics for the current user. func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UserStatsResponse, error) { if err := i.checkRunningOrStopping(); err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go index b5bec043da84..e3f65ab568e2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -92,12 +92,13 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, } i := &Ingester{ - cfg: cfg, - clientConfig: clientConfig, - metrics: newIngesterMetrics(registerer, false), - limits: limits, - chunkStore: nil, - wal: &noopWAL{}, + cfg: cfg, + clientConfig: clientConfig, + metrics: newIngesterMetrics(registerer, false), + limits: limits, + chunkStore: nil, + usersMetadata: map[string]*userMetricsMetadata{}, + wal: &noopWAL{}, TSDBState: TSDBState{ dbs: make(map[string]*userTSDB), bucket: bucketClient, @@ -201,8 +202,14 @@ func (i *Ingester) updateLoop(ctx context.Context) error { refCachePurgeTicker := time.NewTicker(5 * time.Minute) defer refCachePurgeTicker.Stop() + // Similarly to the above, this is a hardcoded value. + metadataPurgeTicker := time.NewTicker(metadataPurgePeriod) + defer metadataPurgeTicker.Stop() + for { select { + case <-metadataPurgeTicker.C: + i.purgeUserMetricsMetadata() case <-rateUpdateTicker.C: i.userStatesMtx.RLock() for _, db := range i.TSDBState.dbs { @@ -261,6 +268,10 @@ func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*clien i.userStatesMtx.RUnlock() defer i.TSDBState.inflightWriteReqs.Done() + // Given metadata is a best-effort approach, and we don't halt on errors + // process it before samples. Otherwise, we risk returning an error before ingestion. + i.pushMetadata(ctx, userID, req.GetMetadata()) + // Keep track of some stats which are tracked only if the samples will be // successfully committed succeededSamplesCount := 0 @@ -363,6 +374,7 @@ func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*clien if firstPartialErr != nil { return &client.WriteResponse{}, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(firstPartialErr, userID).Error()) } + return &client.WriteResponse{}, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go index f2184e2cc2ad..3aa111b947e4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go @@ -8,8 +8,10 @@ import ( ) const ( - errMaxSeriesPerMetricLimitExceeded = "per-metric series limit (local: %d global: %d actual local: %d) exceeded" - errMaxSeriesPerUserLimitExceeded = "per-user series limit (local: %d global: %d actual local: %d) exceeded" + errMaxSeriesPerMetricLimitExceeded = "per-metric series limit (local: %d global: %d actual local: %d) exceeded" + errMaxSeriesPerUserLimitExceeded = "per-user series limit (local: %d global: %d actual local: %d) exceeded" + errMaxMetadataPerMetricLimitExceeded = "per-metric metadata limit (local: %d global: %d actual local: %d) exceeded" + errMaxMetadataPerUserLimitExceeded = "per-user metric metadata limit (local: %d global %d actual local: %d) exceeded" ) // RingCount is the interface exposed by a ring implementation which allows @@ -51,6 +53,21 @@ func (l *Limiter) AssertMaxSeriesPerMetric(userID string, series int) error { return fmt.Errorf(errMaxSeriesPerMetricLimitExceeded, localLimit, globalLimit, actualLimit) } +// AssertMaxMetadataPerMetric limit has not been reached compared to the current +// number of metadata per metric in input and returns an error if so. +func (l *Limiter) AssertMaxMetadataPerMetric(userID string, metadata int) error { + actualLimit := l.maxMetadataPerMetric(userID) + + if metadata < actualLimit { + return nil + } + + localLimit := l.limits.MaxLocalMetadataPerMetric(userID) + globalLimit := l.limits.MaxGlobalMetadataPerMetric(userID) + + return fmt.Errorf(errMaxMetadataPerMetricLimitExceeded, localLimit, globalLimit, actualLimit) +} + // AssertMaxSeriesPerUser limit has not been reached compared to the current // number of series in input and returns an error if so. func (l *Limiter) AssertMaxSeriesPerUser(userID string, series int) error { @@ -65,6 +82,21 @@ func (l *Limiter) AssertMaxSeriesPerUser(userID string, series int) error { return fmt.Errorf(errMaxSeriesPerUserLimitExceeded, localLimit, globalLimit, actualLimit) } +// AssertMaxMetricsWithMetadataPerUser limit has not been reached compared to the current +// number of metrics with metadata in input and returns an error if so. +func (l *Limiter) AssertMaxMetricsWithMetadataPerUser(userID string, metrics int) error { + actualLimit := l.maxMetadataPerUser(userID) + + if metrics < actualLimit { + return nil + } + + localLimit := l.limits.MaxLocalMetricsWithMetadataPerUser(userID) + globalLimit := l.limits.MaxGlobalMetricsWithMetadataPerUser(userID) + + return fmt.Errorf(errMaxMetadataPerUserLimitExceeded, localLimit, globalLimit, actualLimit) +} + // MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit. func (l *Limiter) MaxSeriesPerQuery(userID string) int { return l.limits.MaxSeriesPerQuery(userID) @@ -78,12 +110,12 @@ func (l *Limiter) maxSeriesPerMetric(userID string) int { if l.shardByAllLabels { // We can assume that series are evenly distributed across ingesters // so we do convert the global limit into a local limit - localLimit = l.minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) + localLimit = minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) } else { // Given a metric is always pushed to the same set of ingesters (based on // the replication factor), we can configure the per-ingester local limit // equal to the global limit. - localLimit = l.minNonZero(localLimit, globalLimit) + localLimit = minNonZero(localLimit, globalLimit) } } @@ -96,17 +128,52 @@ func (l *Limiter) maxSeriesPerMetric(userID string) int { return localLimit } +func (l *Limiter) maxMetadataPerMetric(userID string) int { + localLimit := l.limits.MaxLocalMetadataPerMetric(userID) + globalLimit := l.limits.MaxGlobalMetadataPerMetric(userID) + + if globalLimit > 0 { + if l.shardByAllLabels { + localLimit = minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) + } else { + localLimit = minNonZero(localLimit, globalLimit) + } + } + + if localLimit == 0 { + localLimit = math.MaxInt32 + } + + return localLimit +} + func (l *Limiter) maxSeriesPerUser(userID string) int { - localLimit := l.limits.MaxLocalSeriesPerUser(userID) + return l.maxByLocalAndGlobal( + userID, + l.limits.MaxLocalSeriesPerUser, + l.limits.MaxGlobalSeriesPerUser, + ) +} + +func (l *Limiter) maxMetadataPerUser(userID string) int { + return l.maxByLocalAndGlobal( + userID, + l.limits.MaxLocalMetricsWithMetadataPerUser, + l.limits.MaxGlobalMetricsWithMetadataPerUser, + ) +} + +func (l *Limiter) maxByLocalAndGlobal(userID string, localLimitFn, globalLimitFn func(string) int) int { + localLimit := localLimitFn(userID) // The global limit is supported only when shard-by-all-labels is enabled, - // otherwise we wouldn't get an even split of series across ingesters and + // otherwise we wouldn't get an even split of series/metadata across ingesters and // can't take a "local decision" without any centralized coordination. if l.shardByAllLabels { - // We can assume that series are evenly distributed across ingesters + // We can assume that series/metadata are evenly distributed across ingesters // so we do convert the global limit into a local limit - globalLimit := l.limits.MaxGlobalSeriesPerUser(userID) - localLimit = l.minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) + globalLimit := globalLimitFn(userID) + localLimit = minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) } // If both the local and global limits are disabled, we just @@ -138,7 +205,7 @@ func (l *Limiter) convertGlobalToLocalLimit(globalLimit int) int { return 0 } -func (l *Limiter) minNonZero(first, second int) int { +func minNonZero(first, second int) int { if first == 0 || (second != 0 && first > second) { return second } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go index 636fd9ac209b..11a4546f06a3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go @@ -18,20 +18,25 @@ const ( ) type ingesterMetrics struct { - flushQueueLength prometheus.Gauge - ingestedSamples prometheus.Counter - ingestedSamplesFail prometheus.Counter - queries prometheus.Counter - queriedSamples prometheus.Histogram - queriedSeries prometheus.Histogram - queriedChunks prometheus.Histogram - memSeries prometheus.Gauge - memUsers prometheus.Gauge - memSeriesCreatedTotal *prometheus.CounterVec - memSeriesRemovedTotal *prometheus.CounterVec - createdChunks prometheus.Counter - walReplayDuration prometheus.Gauge - walCorruptionsTotal prometheus.Counter + flushQueueLength prometheus.Gauge + ingestedSamples prometheus.Counter + ingestedMetadata prometheus.Counter + ingestedSamplesFail prometheus.Counter + ingestedMetadataFail prometheus.Counter + queries prometheus.Counter + queriedSamples prometheus.Histogram + queriedSeries prometheus.Histogram + queriedChunks prometheus.Histogram + memSeries prometheus.Gauge + memMetadata prometheus.Gauge + memUsers prometheus.Gauge + memSeriesCreatedTotal *prometheus.CounterVec + memMetadataCreatedTotal *prometheus.CounterVec + memSeriesRemovedTotal *prometheus.CounterVec + memMetadataRemovedTotal *prometheus.CounterVec + createdChunks prometheus.Counter + walReplayDuration prometheus.Gauge + walCorruptionsTotal prometheus.Counter // Chunks / blocks transfer. sentChunks prometheus.Counter @@ -62,10 +67,18 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD Name: "cortex_ingester_ingested_samples_total", Help: "The total number of samples ingested.", }), + ingestedMetadata: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_ingested_metadata_total", + Help: "The total number of metadata ingested.", + }), ingestedSamplesFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "cortex_ingester_ingested_samples_failures_total", Help: "The total number of samples that errored on ingestion.", }), + ingestedMetadataFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_ingested_metadata_failures_total", + Help: "The total number of metadata that errored on ingestion.", + }), queries: promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "cortex_ingester_queries_total", Help: "The total number of queries the ingester has handled.", @@ -92,6 +105,10 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD Name: "cortex_ingester_memory_series", Help: "The current number of series in memory.", }), + memMetadata: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ingester_memory_metadata", + Help: "The current number of metadata in memory.", + }), memUsers: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Name: "cortex_ingester_memory_users", Help: "The current number of users in memory.", @@ -108,6 +125,14 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD Name: "cortex_ingester_wal_corruptions_total", Help: "Total number of WAL corruptions encountered.", }), + memMetadataCreatedTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_memory_metadata_created_total", + Help: "The total number of metadata that were created per user", + }, []string{"user"}), + memMetadataRemovedTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_memory_metadata_removed_total", + Help: "The total number of metadata that were removed per user.", + }, []string{"user"}), // Chunks / blocks transfer. sentChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go new file mode 100644 index 000000000000..53e9b2458826 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go @@ -0,0 +1,106 @@ +package ingester + +import ( + "sync" + "time" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +// userMetricsMetadata allows metric metadata of a tenant to be held by the ingester. +// Metadata is kept as a set as it can come from multiple targets that Prometheus scrapes +// with the same metric name. +type userMetricsMetadata struct { + limiter *Limiter + metrics *ingesterMetrics + userID string + + mtx sync.RWMutex + metricToMetadata map[string]metricMetadataSet +} + +func newMetadataMap(l *Limiter, m *ingesterMetrics, userID string) *userMetricsMetadata { + return &userMetricsMetadata{ + metricToMetadata: map[string]metricMetadataSet{}, + limiter: l, + metrics: m, + userID: userID, + } +} + +func (mm *userMetricsMetadata) add(metric string, metadata *client.MetricMetadata) error { + mm.mtx.Lock() + defer mm.mtx.Unlock() + + // As we get the set, we also validate two things: + // 1. The user is allowed to create new metrics to add metadata to. + // 2. If the metadata set is already present, it hasn't reached the limit of metadata we can append. + set, ok := mm.metricToMetadata[metric] + if !ok { + // Verify that the user can create more metric metadata given we don't have a set for that metric name. + if err := mm.limiter.AssertMaxMetricsWithMetadataPerUser(mm.userID, len(mm.metricToMetadata)); err != nil { + validation.DiscardedMetadata.WithLabelValues(mm.userID, perUserMetadataLimit).Inc() + return makeLimitError(perUserMetadataLimit, err) + } + set = metricMetadataSet{} + mm.metricToMetadata[metric] = set + } + + if err := mm.limiter.AssertMaxMetadataPerMetric(mm.userID, len(set)); err != nil { + validation.DiscardedMetadata.WithLabelValues(mm.userID, perMetricMetadataLimit).Inc() + return makeLimitError(perMetricMetadataLimit, err) + } + + // if we have seen this metadata before, it is a no-op and we don't need to change our metrics. + _, ok = set[*metadata] + if !ok { + mm.metrics.memMetadata.Inc() + mm.metrics.memMetadataCreatedTotal.WithLabelValues(mm.userID).Inc() + } + + mm.metricToMetadata[metric][*metadata] = time.Now() + return nil +} + +func (mm *userMetricsMetadata) purge(deadline time.Time) { + mm.mtx.Lock() + defer mm.mtx.Unlock() + var deleted int + for m, s := range mm.metricToMetadata { + deleted += s.purge(deadline) + + if len(s) <= 0 { + delete(mm.metricToMetadata, m) + } + } + + mm.metrics.memMetadata.Sub(float64(deleted)) + mm.metrics.memMetadataRemovedTotal.WithLabelValues(mm.userID).Add(float64(deleted)) +} + +func (mm *userMetricsMetadata) toClientMetadata() []*client.MetricMetadata { + mm.mtx.RLock() + defer mm.mtx.RUnlock() + r := make([]*client.MetricMetadata, 0, len(mm.metricToMetadata)) + for _, set := range mm.metricToMetadata { + for m := range set { + r = append(r, &m) + } + } + return r +} + +type metricMetadataSet map[client.MetricMetadata]time.Time + +func (mms metricMetadataSet) purge(deadline time.Time) int { + var deleted int + for metadata, t := range mms { + if deadline.After(t) { + delete(mms, metadata) + deleted++ + } + } + + return deleted +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go index 7e1029fe62b2..f3d448f01d0b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + tsdb_record "github.com/prometheus/prometheus/tsdb/record" "github.com/segmentio/fasthash/fnv1a" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" @@ -43,6 +44,7 @@ type userState struct { seriesInMetric []metricCounterShard + // Series metrics. memSeries prometheus.Gauge memSeriesCreatedTotal prometheus.Counter memSeriesRemovedTotal prometheus.Counter @@ -169,7 +171,7 @@ func (us *userStates) getViaContext(ctx context.Context) (*userState, bool, erro // NOTE: memory for `labels` is unsafe; anything retained beyond the // life of this function must be copied -func (us *userStates) getOrCreateSeries(ctx context.Context, userID string, labels []client.LabelAdapter, record *Record) (*userState, model.Fingerprint, *memorySeries, error) { +func (us *userStates) getOrCreateSeries(ctx context.Context, userID string, labels []client.LabelAdapter, record *WALRecord) (*userState, model.Fingerprint, *memorySeries, error) { state := us.getOrCreate(userID) // WARNING: `err` may have a reference to unsafe memory in `labels` fp, series, err := state.getSeries(labels, record) @@ -178,7 +180,7 @@ func (us *userStates) getOrCreateSeries(ctx context.Context, userID string, labe // NOTE: memory for `metric` is unsafe; anything retained beyond the // life of this function must be copied -func (u *userState) getSeries(metric labelPairs, record *Record) (model.Fingerprint, *memorySeries, error) { +func (u *userState) getSeries(metric labelPairs, record *WALRecord) (model.Fingerprint, *memorySeries, error) { rawFP := client.FastFingerprint(metric) u.fpLocker.Lock(rawFP) fp := u.mapper.mapFP(rawFP, metric) @@ -201,7 +203,7 @@ func (u *userState) getSeries(metric labelPairs, record *Record) (model.Fingerpr return fp, series, nil } -func (u *userState) createSeriesWithFingerprint(fp model.Fingerprint, metric labelPairs, record *Record, recovery bool) (*memorySeries, error) { +func (u *userState) createSeriesWithFingerprint(fp model.Fingerprint, metric labelPairs, record *WALRecord, recovery bool) (*memorySeries, error) { // There's theoretically a relatively harmless race here if multiple // goroutines get the length of the series map at the same time, then // all proceed to add a new series. This is likely not worth addressing, @@ -232,9 +234,13 @@ func (u *userState) createSeriesWithFingerprint(fp model.Fingerprint, metric lab u.memSeries.Inc() if record != nil { - record.Labels = append(record.Labels, Labels{ - Fingerprint: uint64(fp), - Labels: metric, + lbls := make(labels.Labels, 0, len(metric)) + for _, m := range metric { + lbls = append(lbls, labels.Label(m)) + } + record.Series = append(record.Series, tsdb_record.RefSeries{ + Ref: uint64(fp), + Labels: lbls, }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go index 89a722a90898..50fdd37cc29c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go @@ -20,8 +20,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/tsdb/encoding" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" + tsdb_record "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wal" "github.com/cortexproject/cortex/pkg/ingester/client" @@ -52,15 +54,34 @@ func (cfg *WALConfig) RegisterFlags(f *flag.FlagSet) { // WAL interface allows us to have a no-op WAL when the WAL is disabled. type WAL interface { // Log marshalls the records and writes it into the WAL. - Log(*Record) error + Log(*WALRecord) error // Stop stops all the WAL operations. Stop() } +// RecordType represents the type of the WAL/Checkpoint record. +type RecordType byte + +const ( + // Currently we also support the old records without a type header. + // For that, we assume the record type does not cross 7 as the proto unmarshalling + // will produce an error if the first byte is less than 7 (thus we know its not the old record). + // The old record will be removed in the future releases, hence the record type should not cross + // '7' till then. + + // WALRecordSeries is the type for the WAL record on Prometheus TSDB record for series. + WALRecordSeries RecordType = 1 + // WALRecordSamples is the type for the WAL record based on Prometheus TSDB record for samples. + WALRecordSamples RecordType = 2 + + // CheckpointRecord is the type for the Checkpoint record based on protos. + CheckpointRecord RecordType = 3 +) + type noopWAL struct{} -func (noopWAL) Log(*Record) error { return nil } -func (noopWAL) Stop() {} +func (noopWAL) Log(*WALRecord) error { return nil } +func (noopWAL) Stop() {} type walWrapper struct { cfg WALConfig @@ -70,8 +91,9 @@ type walWrapper struct { wal *wal.WAL getUserStates func() map[string]*userState checkpointMtx sync.Mutex + bytesPool sync.Pool - // Checkpoint metrics. + // Metrics. checkpointDeleteFail prometheus.Counter checkpointDeleteTotal prometheus.Counter checkpointCreationFail prometheus.Counter @@ -79,6 +101,7 @@ type walWrapper struct { checkpointDuration prometheus.Summary checkpointLoggedBytesTotal prometheus.Counter walLoggedBytesTotal prometheus.Counter + walRecordsLogged prometheus.Counter } // newWAL creates a WAL object. If the WAL is disabled, then the returned WAL is a no-op WAL. @@ -93,7 +116,7 @@ func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, register if registerer != nil { walRegistry = prometheus.WrapRegistererWith(prometheus.Labels{"kind": "wal"}, registerer) } - tsdbWAL, err := wal.NewSize(util.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, true) + tsdbWAL, err := wal.NewSize(util.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false) if err != nil { return nil, err } @@ -103,6 +126,11 @@ func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, register quit: make(chan struct{}), wal: tsdbWAL, getUserStates: userStatesFunc, + bytesPool: sync.Pool{ + New: func() interface{} { + return make([]byte, 0, 512) + }, + }, } w.checkpointDeleteFail = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ @@ -126,6 +154,10 @@ func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, register Help: "Time taken to create a checkpoint.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) + w.walRecordsLogged = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_wal_records_logged_total", + Help: "Total number of WAL records logged.", + }) w.checkpointLoggedBytesTotal = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_ingester_checkpoint_logged_bytes_total", Help: "Total number of bytes written to disk for checkpointing.", @@ -146,20 +178,37 @@ func (w *walWrapper) Stop() { w.wal.Close() } -func (w *walWrapper) Log(record *Record) error { +func (w *walWrapper) Log(record *WALRecord) error { + if record == nil { + return nil + } select { case <-w.quit: return nil default: - if record == nil { - return nil + buf := w.bytesPool.Get().([]byte)[:0] + defer func() { + w.bytesPool.Put(buf) // nolint:staticcheck + }() + + if len(record.Series) > 0 { + buf = record.encodeSeries(buf) + if err := w.wal.Log(buf); err != nil { + return err + } + w.walRecordsLogged.Inc() + w.walLoggedBytesTotal.Add(float64(len(buf))) + buf = buf[:0] } - buf, err := proto.Marshal(record) - if err != nil { - return err + if len(record.Samples) > 0 { + buf = record.encodeSamples(buf) + if err := w.wal.Log(buf); err != nil { + return err + } + w.walRecordsLogged.Inc() + w.walLoggedBytesTotal.Add(float64(len(buf))) } - w.walLoggedBytesTotal.Add(float64(len(buf))) - return w.wal.Log(buf) + return nil } } @@ -256,7 +305,7 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) { if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil { return errors.Wrap(err, "create checkpoint dir") } - checkpoint, err := wal.New(nil, nil, checkpointDirTemp, true) + checkpoint, err := wal.New(nil, nil, checkpointDirTemp, false) if err != nil { return errors.Wrap(err, "open checkpoint") } @@ -283,10 +332,11 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) { } var wireChunkBuf []client.Chunk + b := make([]byte, 0, 1024) for userID, state := range us { for pair := range state.fpToSeries.iter() { state.fpLocker.Lock(pair.fp) - wireChunkBuf, err = w.checkpointSeries(checkpoint, userID, pair.fp, pair.series, wireChunkBuf) + wireChunkBuf, b, err = w.checkpointSeries(checkpoint, userID, pair.fp, pair.series, wireChunkBuf, b) state.fpLocker.Unlock(pair.fp) if err != nil { return err @@ -395,28 +445,28 @@ func (w *walWrapper) deleteCheckpoints(maxIndex int) (err error) { } // checkpointSeries write the chunks of the series to the checkpoint. -func (w *walWrapper) checkpointSeries(cp *wal.WAL, userID string, fp model.Fingerprint, series *memorySeries, wireChunks []client.Chunk) ([]client.Chunk, error) { +func (w *walWrapper) checkpointSeries(cp *wal.WAL, userID string, fp model.Fingerprint, series *memorySeries, wireChunks []client.Chunk, b []byte) ([]client.Chunk, []byte, error) { var err error wireChunks, err = toWireChunks(series.chunkDescs, wireChunks[:0]) if err != nil { - return wireChunks, err + return wireChunks, b, err } - buf, err := proto.Marshal(&Series{ + b, err = encodeWithTypeHeader(&Series{ UserId: userID, Fingerprint: uint64(fp), Labels: client.FromLabelsToLabelAdapters(series.metric), Chunks: wireChunks, - }) + }, CheckpointRecord, b) if err != nil { - return wireChunks, err + return wireChunks, b, err } - err = cp.Log(buf) + err = cp.Log(b) if err == nil { - w.checkpointLoggedBytesTotal.Add(float64(len(buf))) + w.checkpointLoggedBytesTotal.Add(float64(len(b))) } - return wireChunks, err + return wireChunks, b, err } type walRecoveryParameters struct { @@ -583,12 +633,15 @@ func processCheckpoint(name string, userStates *userStates, params walRecoveryPa Loop: for reader.Next() { s := seriesPool.Get().(*Series) - if err := proto.Unmarshal(reader.Record(), s); err != nil { + m, err := decodeCheckpointRecord(reader.Record(), s) + if err != nil { // We don't return here in order to close/drain all the channels and // make sure all goroutines exit. capturedErr = err break Loop } + s = m.(*Series) + // The yoloString from the unmarshal of LabelAdapter gets corrupted // when travelling through the channel. Hence making a copy of that. // This extra alloc during the read path is fine as it's only 1 time @@ -688,7 +741,7 @@ func processCheckpointRecord( } type samplesWithUserID struct { - samples []Sample + samples []tsdb_record.RefSample userID string } @@ -755,6 +808,8 @@ func processWAL(startSegment int, userStates *userStates, params walRecoveryPara var ( capturedErr error record = &Record{} + walRecord = &WALRecord{} + lp labelPairs ) Loop: for reader.Next() { @@ -765,23 +820,52 @@ Loop: break Loop default: } - if err := proto.Unmarshal(reader.Record(), record); err != nil { + + record.Samples = record.Samples[:0] + record.Labels = record.Labels[:0] + // Only one of 'record' or 'walRecord' will have the data. + if err := decodeWALRecord(reader.Record(), record, walRecord); err != nil { // We don't return here in order to close/drain all the channels and // make sure all goroutines exit. capturedErr = err break Loop } - if len(record.Labels) > 0 { - state := userStates.getOrCreate(record.UserId) - // Create the series from labels which do not exist. - for _, labels := range record.Labels { - _, ok := state.fpToSeries.get(model.Fingerprint(labels.Fingerprint)) + if len(record.Labels) > 0 || len(walRecord.Series) > 0 { + + var userID string + if len(walRecord.Series) > 0 { + userID = walRecord.UserID + } else { + userID = record.UserId + } + + state := userStates.getOrCreate(userID) + + createSeries := func(fingerprint model.Fingerprint, lbls labelPairs) error { + _, ok := state.fpToSeries.get(fingerprint) if ok { - continue + return nil + } + _, err := state.createSeriesWithFingerprint(fingerprint, lbls, nil, true) + return err + } + + for _, labels := range record.Labels { + if err := createSeries(model.Fingerprint(labels.Fingerprint), labels.Labels); err != nil { + // We don't return here in order to close/drain all the channels and + // make sure all goroutines exit. + capturedErr = err + break Loop + } + } + + for _, s := range walRecord.Series { + lp = lp[:0] + for _, l := range s.Labels { + lp = append(lp, client.LabelAdapter(l)) } - _, err := state.createSeriesWithFingerprint(model.Fingerprint(labels.Fingerprint), labels.Labels, nil, true) - if err != nil { + if err := createSeries(model.Fingerprint(s.Ref), lp); err != nil { // We don't return here in order to close/drain all the channels and // make sure all goroutines exit. capturedErr = err @@ -794,39 +878,70 @@ Loop: // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise // cause thousands of very large in flight buffers occupying large amounts // of unused memory. - for len(record.Samples) > 0 { + for len(record.Samples) > 0 || len(walRecord.Samples) > 0 { m := 5000 - if len(record.Samples) < m { - m = len(record.Samples) + var userID string + if len(record.Samples) > 0 { + userID = record.UserId + if len(record.Samples) < m { + m = len(record.Samples) + } } + if len(walRecord.Samples) > 0 { + userID = walRecord.UserID + if len(walRecord.Samples) < m { + m = len(walRecord.Samples) + } + } + for i := 0; i < params.numWorkers; i++ { if len(shards[i].samples) == 0 { // It is possible that the previous iteration did not put // anything in this shard. In that case no need to get a new buffer. - shards[i].userID = record.UserId + shards[i].userID = userID continue } select { case buf := <-outputs[i]: buf.samples = buf.samples[:0] - buf.userID = record.UserId + buf.userID = userID shards[i] = buf default: shards[i] = &samplesWithUserID{ - userID: record.UserId, + userID: userID, } } } - for _, sam := range record.Samples[:m] { - mod := sam.Fingerprint % uint64(params.numWorkers) - shards[mod].samples = append(shards[mod].samples, sam) + + if len(record.Samples) > 0 { + for _, sam := range record.Samples[:m] { + mod := sam.Fingerprint % uint64(params.numWorkers) + shards[mod].samples = append(shards[mod].samples, tsdb_record.RefSample{ + Ref: sam.Fingerprint, + T: int64(sam.Timestamp), + V: sam.Value, + }) + } + } + if len(walRecord.Samples) > 0 { + for _, sam := range walRecord.Samples[:m] { + mod := sam.Ref % uint64(params.numWorkers) + shards[mod].samples = append(shards[mod].samples, sam) + } } + for i := 0; i < params.numWorkers; i++ { if len(shards[i].samples) > 0 { inputs[i] <- shards[i] } } - record.Samples = record.Samples[m:] + + if len(record.Samples) > 0 { + record.Samples = record.Samples[m:] + } + if len(walRecord.Samples) > 0 { + walRecord.Samples = walRecord.Samples[m:] + } } } @@ -868,21 +983,20 @@ func processWALSamples(userStates *userStates, stateCache map[string]*userState, } sc := seriesCache[samples.userID] for i := range samples.samples { - series, ok := sc[samples.samples[i].Fingerprint] + series, ok := sc[samples.samples[i].Ref] if !ok { - series, ok = state.fpToSeries.get(model.Fingerprint(samples.samples[i].Fingerprint)) + series, ok = state.fpToSeries.get(model.Fingerprint(samples.samples[i].Ref)) if !ok { // This should ideally not happen. // If the series was not created in recovering checkpoint or // from the labels of any records previous to this, there // is no way to get the labels for this fingerprint. - level.Warn(util.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Fingerprint).String()) + level.Warn(util.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String()) continue } } - - sp.Timestamp = model.Time(samples.samples[i].Timestamp) - sp.Value = model.SampleValue(samples.samples[i].Value) + sp.Timestamp = model.Time(samples.samples[i].T) + sp.Value = model.SampleValue(samples.samples[i].V) // There can be many out of order samples because of checkpoint and WAL overlap. // Checking this beforehand avoids the allocation of lots of error messages. if sp.Timestamp.After(series.lastTime) { @@ -956,3 +1070,109 @@ func SegmentRange(dir string) (int, int, error) { } return first, last, nil } + +func decodeCheckpointRecord(rec []byte, m proto.Message) (_ proto.Message, err error) { + switch RecordType(rec[0]) { + case CheckpointRecord: + if err := proto.Unmarshal(rec[1:], m); err != nil { + return m, err + } + default: + // The legacy proto record will have it's first byte >7. + // Hence it does not match any of the existing record types. + err := proto.Unmarshal(rec, m) + if err != nil { + return m, err + } + } + + return m, err +} + +func encodeWithTypeHeader(m proto.Message, typ RecordType, b []byte) ([]byte, error) { + buf, err := proto.Marshal(m) + if err != nil { + return b, err + } + + b = append(b[:0], byte(typ)) + b = append(b, buf...) + return b, nil +} + +// WALRecord is a struct combining the series and samples record. +type WALRecord struct { + UserID string + Series []tsdb_record.RefSeries + Samples []tsdb_record.RefSample +} + +func (record *WALRecord) encodeSeries(b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(WALRecordSeries)) + buf.PutUvarintStr(record.UserID) + + var enc tsdb_record.Encoder + // The 'encoded' already has the type header and userID here, hence re-using + // the remaining part of the slice (i.e. encoded[len(encoded):])) to encode the series. + encoded := buf.Get() + encoded = append(encoded, enc.Series(record.Series, encoded[len(encoded):])...) + + return encoded +} + +func (record *WALRecord) encodeSamples(b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(WALRecordSamples)) + buf.PutUvarintStr(record.UserID) + + var enc tsdb_record.Encoder + // The 'encoded' already has the type header and userID here, hence re-using + // the remaining part of the slice (i.e. encoded[len(encoded):]))to encode the samples. + encoded := buf.Get() + encoded = append(encoded, enc.Samples(record.Samples, encoded[len(encoded):])...) + + return encoded +} + +func decodeWALRecord(b []byte, rec *Record, walRec *WALRecord) (err error) { + var ( + userID string + dec tsdb_record.Decoder + rseries []tsdb_record.RefSeries + rsamples []tsdb_record.RefSample + + decbuf = encoding.Decbuf{B: b} + t = RecordType(decbuf.Byte()) + ) + + walRec.Series = walRec.Series[:0] + walRec.Samples = walRec.Samples[:0] + switch t { + case WALRecordSamples: + userID = decbuf.UvarintStr() + rsamples, err = dec.Samples(decbuf.B, walRec.Samples) + case WALRecordSeries: + userID = decbuf.UvarintStr() + rseries, err = dec.Series(decbuf.B, walRec.Series) + default: + // The legacy proto record will have it's first byte >7. + // Hence it does not match any of the existing record types. + err = proto.Unmarshal(b, rec) + return err + } + + // We reach here only if its a record with type header. + if decbuf.Err() != nil { + return decbuf.Err() + } + + if err == nil { + // There was no error decoding the records with type headers. + walRec.UserID = userID + walRec.Samples = rsamples + walRec.Series = rseries + } + + return err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go index 57a5d02179ca..986f2370def1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/weaveworks/common/user" @@ -24,6 +25,7 @@ type Distributor interface { LabelValuesForLabelName(context.Context, model.LabelName) ([]string, error) LabelNames(context.Context) ([]string, error) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) + MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) } func newDistributorQueryable(distributor Distributor, streaming bool, iteratorFn chunkIteratorFunc) storage.Queryable { diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go index 1c5fce638f1d..a59443f8b04f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go @@ -11,6 +11,7 @@ import ( "net/http" "net/url" "path" + "strings" "sync" "time" @@ -23,6 +24,8 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -152,27 +155,26 @@ func (f *Frontend) Handler() http.Handler { } func (f *Frontend) handle(w http.ResponseWriter, r *http.Request) { - userID, err := user.ExtractOrgID(r.Context()) - if err != nil { - server.WriteError(w, err) - return - } startTime := time.Now() resp, err := f.roundTripper.RoundTrip(r) queryResponseTime := time.Since(startTime) if f.cfg.LogQueriesLongerThan > 0 && queryResponseTime > f.cfg.LogQueriesLongerThan { - logMessage := []interface{}{"msg", "slow query", - "org_id", userID, - "url", fmt.Sprintf("http://%s", r.Host+r.RequestURI), + logMessage := []interface{}{ + "msg", "slow query", + "host", r.Host, + "path", r.URL.Path, "time_taken", queryResponseTime.String(), } + for k, v := range r.URL.Query() { + logMessage = append(logMessage, fmt.Sprintf("qs_%s", k), strings.Join(v, ",")) + } pf := r.PostForm.Encode() if pf != "" { logMessage = append(logMessage, "body", pf) } - level.Info(f.log).Log(logMessage...) + level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) } if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go index 60b9fbde0e7a..e03de6f00d56 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -93,6 +93,14 @@ type Config struct { EnableAPI bool `yaml:"enable_api"` } +// Validate config and returns error on failure +func (cfg *Config) Validate() error { + if err := cfg.StoreConfig.Validate(); err != nil { + return errors.Wrap(err, "invalid storage config") + } + return nil +} + // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.StoreConfig.RegisterFlags(f) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go index 6bface18117b..e9da96659185 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go @@ -5,10 +5,13 @@ import ( "flag" "fmt" + "github.com/pkg/errors" + "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/aws" "github.com/cortexproject/cortex/pkg/chunk/azure" "github.com/cortexproject/cortex/pkg/chunk/gcp" + "github.com/cortexproject/cortex/pkg/chunk/openstack" "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/ruler/rules/objectclient" @@ -23,6 +26,7 @@ type RuleStoreConfig struct { Azure azure.BlobStorageConfig `yaml:"azure"` GCS gcp.GCSConfig `yaml:"gcs"` S3 aws.S3Config `yaml:"s3"` + Swift openstack.SwiftConfig `yaml:"swift"` mock rules.RuleStore `yaml:"-"` } @@ -33,9 +37,18 @@ func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.Azure.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.GCS.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) + cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f) f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3)") } +// Validate config and returns error on failure +func (cfg *RuleStoreConfig) Validate() error { + if err := cfg.Swift.Validate(); err != nil { + return errors.Wrap(err, "invalid Swift Storage config") + } + return nil +} + // NewRuleStorage returns a new rule storage backend poller and store func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { if cfg.mock != nil { @@ -57,6 +70,8 @@ func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { return newObjRuleStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCS, "")) case "s3": return newObjRuleStore(aws.NewS3ObjectClient(cfg.S3, "")) + case "swift": + return newObjRuleStore(openstack.NewSwiftObjectClient(cfg.Swift, "")) default: return nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs", cfg.Type) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go index 31bf79f65527..ae681fd59b86 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log.go @@ -103,10 +103,9 @@ func WithContext(ctx context.Context, l log.Logger) log.Logger { // Weaveworks uses "orgs" and "orgID" to represent Cortex users, // even though the code-base generally uses `userID` to refer to the same thing. userID, err := user.ExtractOrgID(ctx) - if err != nil { - return l + if err == nil { + l = WithUserID(userID, l) } - l = WithUserID(userID, l) traceID, ok := middleware.ExtractTraceID(ctx) if !ok { @@ -127,7 +126,7 @@ func WithUserID(userID string, l log.Logger) log.Logger { // its details. func WithTraceID(traceID string, l log.Logger) log.Logger { // See note in WithContext. - return log.With(l, "trace_id", traceID) + return log.With(l, "traceID", traceID) } // CheckFatal prints an error and exits with error code 1 if err is non-nil diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go index da4037890d6f..a3018b0f38a9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go @@ -37,12 +37,12 @@ func FromContext(ctx context.Context) *SpanLogger { sp := opentracing.SpanFromContext(ctx) if sp == nil { return &SpanLogger{ - Logger: util.Logger, + Logger: util.WithContext(ctx, util.Logger), Span: defaultNoopSpan, } } return &SpanLogger{ - Logger: util.Logger, + Logger: util.WithContext(ctx, util.Logger), Span: sp, } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index 7af7876ed6ad..588e25768200 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -41,6 +41,7 @@ type Limits struct { SubringSize int `yaml:"user_subring_size"` // Ingester enforced limits. + // Series MaxSeriesPerQuery int `yaml:"max_series_per_query"` MaxSamplesPerQuery int `yaml:"max_samples_per_query"` MaxLocalSeriesPerUser int `yaml:"max_series_per_user"` @@ -48,6 +49,11 @@ type Limits struct { MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user"` MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric"` MinChunkLength int `yaml:"min_chunk_length"` + // Metadata + MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user"` + MaxLocalMetadataPerMetric int `yaml:"max_metadata_per_metric"` + MaxGlobalMetricsWithMetadataPerUser int `yaml:"max_global_metadata_per_user"` + MaxGlobalMetadataPerMetric int `yaml:"max_global_metadata_per_metric"` // Querier enforced limits. MaxChunksPerQuery int `yaml:"max_chunks_per_query"` @@ -88,6 +94,11 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster. 0 to disable.") f.IntVar(&l.MinChunkLength, "ingester.min-chunk-length", 0, "Minimum number of samples in an idle chunk to flush it to the store. Use with care, if chunks are less than this size they will be discarded.") + f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.") + f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.") + f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") + f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") + f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.") f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit to length of chunk store queries, 0 to disable.") f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") @@ -297,6 +308,26 @@ func (o *Overrides) MinChunkLength(userID string) int { return o.getOverridesForUser(userID).MinChunkLength } +// MaxLocalMetricsWithMetadataPerUser returns the maximum number of metrics with metadata a user is allowed to store in a single ingester. +func (o *Overrides) MaxLocalMetricsWithMetadataPerUser(userID string) int { + return o.getOverridesForUser(userID).MaxLocalMetricsWithMetadataPerUser +} + +// MaxLocalMetadataPerMetric returns the maximum number of metadata allowed per metric in a single ingester. +func (o *Overrides) MaxLocalMetadataPerMetric(userID string) int { + return o.getOverridesForUser(userID).MaxLocalMetadataPerMetric +} + +// MaxGlobalMetricsWithMetadataPerUser returns the maximum number of metrics with metadata a user is allowed to store across the cluster. +func (o *Overrides) MaxGlobalMetricsWithMetadataPerUser(userID string) int { + return o.getOverridesForUser(userID).MaxGlobalMetricsWithMetadataPerUser +} + +// MaxGlobalMetadataPerMetric returns the maximum number of metadata allowed per metric across the cluster. +func (o *Overrides) MaxGlobalMetadataPerMetric(userID string) int { + return o.getOverridesForUser(userID).MaxGlobalMetadataPerMetric +} + // SubringSize returns the size of the subring for a given user. func (o *Overrides) SubringSize(userID string) int { return o.getOverridesForUser(userID).SubringSize diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index b3802d9c7dc1..6434b34d1cd8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -66,6 +66,8 @@ var DiscardedSamples = prometheus.NewCounterVec( }, []string{discardReasonLabel, "user"}, ) + +// DiscardedMetadata is a metric of the number of discarded metadata, by reason. var DiscardedMetadata = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cortex_discarded_metadata_total", diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go new file mode 100644 index 000000000000..0fa1c083a262 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go @@ -0,0 +1,29 @@ +/* +Package accounts contains functionality for working with Object Storage +account resources. An account is the top-level resource the object storage +hierarchy: containers belong to accounts, objects belong to containers. + +Another way of thinking of an account is like a namespace for all your +resources. It is synonymous with a project or tenant in other OpenStack +services. + +Example to Get an Account + + account, err := accounts.Get(objectStorageClient, nil).Extract() + fmt.Printf("%+v\n", account) + +Example to Update an Account + + metadata := map[string]string{ + "some": "metadata", + } + + updateOpts := accounts.UpdateOpts{ + Metadata: metadata, + } + + updateResult, err := accounts.Update(objectStorageClient, updateOpts).Extract() + fmt.Printf("%+v\n", updateResult) + +*/ +package accounts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go new file mode 100644 index 000000000000..452a331c74ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go @@ -0,0 +1,100 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +// GetOptsBuilder allows extensions to add additional headers to the Get +// request. +type GetOptsBuilder interface { + ToAccountGetMap() (map[string]string, error) +} + +// GetOpts is a structure that contains parameters for getting an account's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToAccountGetMap formats a GetOpts into a map[string]string of headers. +func (opts GetOpts) ToAccountGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves an account's metadata. To extract just the +// custom metadata, call the ExtractMetadata method on the GetResult. To extract +// all the headers that are returned (including the metadata), call the +// Extract method on the GetResult. +func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) (r GetResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountGetMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Head(getURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional headers to the Update +// request. +type UpdateOptsBuilder interface { + ToAccountUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that contains parameters for updating, creating, or +// deleting an account's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` +} + +// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. +func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { + headers, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + headers["X-Account-Meta-"+k] = v + } + return headers, err +} + +// Update is a function that creates, updates, or deletes an account's metadata. +// To extract the headers returned, call the Extract method on the UpdateResult. +func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountUpdateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go new file mode 100644 index 000000000000..10661e671571 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go @@ -0,0 +1,180 @@ +package accounts + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" +) + +// UpdateResult is returned from a call to the Update function. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// UpdateHeader represents the headers returned in the response from an Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + Date time.Time `json:"-"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// Extract will return a struct of headers returned from a call to Get. To +// obtain a map of headers, call the Extract method on the GetResult. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + BytesUsed int64 `json:"-"` + QuotaBytes *int64 `json:"-"` + ContainerCount int64 `json:"-"` + ContentLength int64 `json:"-"` + ObjectCount int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + TempURLKey string `json:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `json:"X-Account-Meta-Temp-URL-Key-2"` + Date time.Time `json:"-"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Account-Bytes-Used"` + QuotaBytes string `json:"X-Account-Meta-Quota-Bytes"` + ContentLength string `json:"Content-Length"` + ContainerCount string `json:"X-Account-Container-Count"` + ObjectCount string `json:"X-Account-Object-Count"` + Date string `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.QuotaBytes { + case "": + r.QuotaBytes = nil + default: + v, err := strconv.ParseInt(s.QuotaBytes, 10, 64) + if err != nil { + return err + } + r.QuotaBytes = &v + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + switch s.ContainerCount { + case "": + r.ContainerCount = 0 + default: + r.ContainerCount, err = strconv.ParseInt(s.ContainerCount, 10, 64) + if err != nil { + return err + } + } + + if s.Date != "" { + r.Date, err = time.Parse(time.RFC1123, s.Date) + } + + return err +} + +// GetResult is returned from a call to the Get function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metatdata associated with the account. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Account-Meta-") { + key := strings.TrimPrefix(k, "X-Account-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go new file mode 100644 index 000000000000..71540b1daf3d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go @@ -0,0 +1,11 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func updateURL(c *gophercloud.ServiceClient) string { + return getURL(c) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go new file mode 100644 index 000000000000..ffc4f05297b6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go @@ -0,0 +1,95 @@ +/* +Package containers contains functionality for working with Object Storage +container resources. A container serves as a logical namespace for objects +that are placed inside it - an object with the same name in two different +containers represents two different objects. + +In addition to containing objects, you can also use the container to control +access to objects by using an access control list (ACL). + +Note: When referencing the Object Storage API docs, some of the API actions +are listed under "accounts" rather than "containers". This was an intentional +design in Gophercloud to make some container actions feel more natural. + +Example to List Containers + + listOpts := containers.ListOpts{ + Full: true, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to List Only Container Names + + listOpts := containers.ListOpts{ + Full: false, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to Create a Container + + createOpts := containers.CreateOpts{ + ContentType: "application/json", + Metadata: map[string]string{ + "foo": "bar", + }, + } + + container, err := containers.Create(objectStorageClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Container + + containerName := "my_container" + + updateOpts := containers.UpdateOpts{ + Metadata: map[string]string{ + "bar": "baz", + }, + RemoveMetadata: []string{ + "foo", + }, + } + + container, err := containers.Update(objectStorageClient, containerName, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Container + + containerName := "my_container" + + container, err := containers.Delete(objectStorageClient, containerName).Extract() + if err != nil { + panic(err) + } +*/ +package containers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go new file mode 100644 index 000000000000..ca99bb2a6aae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go @@ -0,0 +1,231 @@ +package containers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToContainerListParams() (bool, string, error) +} + +// ListOpts is a structure that holds options for listing containers. +type ListOpts struct { + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` +} + +// ToContainerListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each container. +func (opts ListOpts) ToContainerListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves containers associated with the account as +// well as account metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c) + if opts != nil { + full, query, err := opts.ToContainerListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ContainerPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToContainerCreateMap() (map[string]string, error) +} + +// CreateOpts is a structure that holds parameters for creating a container. +type CreateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + IfNoneMatch string `h:"If-None-Match"` + VersionsLocation string `h:"X-Versions-Location"` + HistoryLocation string `h:"X-History-Location"` +} + +// ToContainerCreateMap formats a CreateOpts into a map of headers. +func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Create is a function that creates a new container. +func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) (r CreateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerCreateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("PUT", createURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + resp.Body.Close() + } + r.Err = err + return +} + +// Delete is a function that deletes a container. +func Delete(c *gophercloud.ServiceClient, containerName string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, containerName), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToContainerUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting a container's metadata. +type UpdateOpts struct { + Metadata map[string]string + RemoveMetadata []string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` + VersionsLocation string `h:"X-Versions-Location"` + RemoveHistoryLocation string `h:"X-Remove-History-Location"` + HistoryLocation string `h:"X-History-Location"` +} + +// ToContainerUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + + for _, k := range opts.RemoveMetadata { + h["X-Remove-Container-Meta-"+k] = "remove" + } + + return h, nil +} + +// Update is a function that creates, updates, or deletes a container's +// metadata. +func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// GetOptsBuilder allows extensions to add additional parameters to the Get +// request. +type GetOptsBuilder interface { + ToContainerGetMap() (map[string]string, error) +} + +// GetOpts is a structure that holds options for listing containers. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToContainerGetMap formats a GetOpts into a map of headers. +func (opts GetOpts) ToContainerGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves the metadata of a container. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName string, opts GetOptsBuilder) (r GetResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerGetMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Head(getURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go new file mode 100644 index 000000000000..cce2190ff9ff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go @@ -0,0 +1,344 @@ +package containers + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Container represents a container resource. +type Container struct { + // The total number of bytes stored in the container. + Bytes int64 `json:"bytes"` + + // The total number of objects stored in the container. + Count int64 `json:"count"` + + // The name of the container. + Name string `json:"name"` +} + +// ContainerPage is the page returned by a pager when traversing over a +// collection of containers. +type ContainerPage struct { + pagination.MarkerPageBase +} + +//IsEmpty returns true if a ListResult contains no container names. +func (r ContainerPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last container name in a ListResult. +func (r ContainerPage) LastMarker() (string, error) { + names, err := ExtractNames(r) + if err != nil { + return "", err + } + if len(names) == 0 { + return "", nil + } + return names[len(names)-1], nil +} + +// ExtractInfo is a function that takes a ListResult and returns the +// containers' information. +func ExtractInfo(r pagination.Page) ([]Container, error) { + var s []Container + err := (r.(ContainerPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a ListResult and returns the +// containers' names. +func ExtractNames(page pagination.Page) ([]string, error) { + casted := page.(ContainerPage) + ct := casted.Header.Get("Content-Type") + + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(page) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, container := range parsed { + names = append(names, container.Name) + } + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(page.(ContainerPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + BytesUsed int64 `json:"-"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ObjectCount int64 `json:"-"` + Read []string `json:"-"` + TransID string `json:"X-Trans-Id"` + VersionsLocation string `json:"X-Versions-Location"` + HistoryLocation string `json:"X-History-Location"` + Write []string `json:"-"` + StoragePolicy string `json:"X-Storage-Policy"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Container-Bytes-Used"` + ContentLength string `json:"Content-Length"` + ObjectCount string `json:"X-Container-Object-Count"` + Write string `json:"X-Container-Write"` + Read string `json:"X-Container-Read"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + r.Read = strings.Split(s.Read, ",") + r.Write = strings.Split(s.Write, ",") + + r.Date = time.Time(s.Date) + + return err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the container. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Container-Meta-") { + key := strings.TrimPrefix(k, "X-Container-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a Create +// request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// CreateResult represents the result of a create operation. To extract the +// the headers from the HTTP response, call its Extract method. +type CreateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +// To extract the headers from the HTTP response, call its Extract method. +func (r CreateResult) Extract() (*CreateHeader, error) { + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// UpdateResult represents the result of an update operation. To extract the +// the headers from the HTTP response, call its Extract method. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a Delete +// request. +type DeleteHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// DeleteResult represents the result of a delete operation. To extract the +// the headers from the HTTP response, call its Extract method. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go new file mode 100644 index 000000000000..9b380470dd78 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go @@ -0,0 +1,23 @@ +package containers + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func createURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func getURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func deleteURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func updateURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go new file mode 100644 index 000000000000..e9b4b8a9f321 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go @@ -0,0 +1,106 @@ +/* +Package objects contains functionality for working with Object Storage +object resources. An object is a resource that represents and contains data +- such as documents, images, and so on. You can also store custom metadata +with an object. + +Note: When referencing the Object Storage API docs, some of the API actions +are listed under "containers" rather than "objects". This was an intentional +design in Gophercloud to make some object actions feel more natural. + +Example to List Objects + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: true, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to List Object Names + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: false, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to Create an Object + + content := "some object content" + objectName := "my_object" + containerName := "my_container" + + createOpts := objects.CreateOpts{ + ContentType: "text/plain" + Content: strings.NewReader(content), + } + + object, err := objects.Create(objectStorageClient, containerName, objectName, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Copy an Object + + objectName := "my_object" + containerName := "my_container" + + copyOpts := objects.CopyOpts{ + Destination: "/newContainer/newObject", + } + + object, err := objects.Copy(objectStorageClient, containerName, objectName, copyOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Object + + objectName := "my_object" + containerName := "my_container" + + object, err := objects.Delete(objectStorageClient, containerName, objectName).Extract() + if err != nil { + panic(err) + } + +Example to Download an Object's Data + + objectName := "my_object" + containerName := "my_container" + + object := objects.Download(objectStorageClient, containerName, objectName, nil) + content, err := object.ExtractContent() + if err != nil { + panic(err) + } +*/ +package objects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go new file mode 100644 index 000000000000..5c4ae44d3176 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go @@ -0,0 +1,13 @@ +package objects + +import "github.com/gophercloud/gophercloud" + +// ErrWrongChecksum is the error when the checksum generated for an object +// doesn't match the ETAG header. +type ErrWrongChecksum struct { + gophercloud.BaseError +} + +func (e ErrWrongChecksum) Error() string { + return "Local checksum does not match API ETag header" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go new file mode 100644 index 000000000000..7325cd7d0b9b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go @@ -0,0 +1,499 @@ +package objects + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToObjectListParams() (bool, string, error) +} + +// ListOpts is a structure that holds parameters for listing objects. +type ListOpts struct { + // Full is a true/false value that represents the amount of object information + // returned. If Full is set to true, then the content-type, number of bytes, + // hash date last modified, and name are returned. If set to false or not set, + // then only the object names are returned. + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` + Path string `q:"path"` +} + +// ToObjectListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each object. +func (opts ListOpts) ToObjectListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves all objects in a container. It also returns +// the details for the container. To extract only the object information or names, +// pass the ListResult response to the ExtractInfo or ExtractNames function, +// respectively. +func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c, containerName) + if opts != nil { + full, query, err := opts.ToObjectListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ObjectPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// DownloadOptsBuilder allows extensions to add additional parameters to the +// Download request. +type DownloadOptsBuilder interface { + ToObjectDownloadParams() (map[string]string, string, error) +} + +// DownloadOpts is a structure that holds parameters for downloading an object. +type DownloadOpts struct { + IfMatch string `h:"If-Match"` + IfModifiedSince time.Time `h:"If-Modified-Since"` + IfNoneMatch string `h:"If-None-Match"` + IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"` + Newest bool `h:"X-Newest"` + Range string `h:"Range"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectDownloadParams formats a DownloadOpts into a query string and map of +// headers. +func (opts DownloadOpts) ToObjectDownloadParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Download is a function that retrieves the content and metadata for an object. +// To extract just the content, pass the DownloadResult response to the +// ExtractContent function. +func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) (r DownloadResult) { + url := downloadURL(c, containerName, objectName) + h := make(map[string]string) + if opts != nil { + headers, query, err := opts.ToObjectDownloadParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + } + + resp, err := c.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 206, 304}, + }) + if resp != nil { + r.Header = resp.Header + r.Body = resp.Body + } + r.Err = err + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToObjectCreateParams() (io.Reader, map[string]string, string, error) +} + +// CreateOpts is a structure that holds parameters for creating an object. +type CreateOpts struct { + Content io.Reader + Metadata map[string]string + NoETag bool + CacheControl string `h:"Cache-Control"` + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentLength int64 `h:"Content-Length"` + ContentType string `h:"Content-Type"` + CopyFrom string `h:"X-Copy-From"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType string `h:"X-Detect-Content-Type"` + ETag string `h:"ETag"` + IfNoneMatch string `h:"If-None-Match"` + ObjectManifest string `h:"X-Object-Manifest"` + TransferEncoding string `h:"Transfer-Encoding"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectCreateParams formats a CreateOpts into a query string and map of +// headers. +func (opts CreateOpts) ToObjectCreateParams() (io.Reader, map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, nil, "", err + } + + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + + if opts.NoETag { + delete(h, "etag") + return opts.Content, h, q.String(), nil + } + + if h["ETag"] != "" { + return opts.Content, h, q.String(), nil + } + + // When we're dealing with big files an io.ReadSeeker allows us to efficiently calculate + // the md5 sum. An io.Reader is only readable once which means we have to copy the entire + // file content into memory first. + readSeeker, isReadSeeker := opts.Content.(io.ReadSeeker) + if !isReadSeeker { + data, err := ioutil.ReadAll(opts.Content) + if err != nil { + return nil, nil, "", err + } + readSeeker = bytes.NewReader(data) + } + + hash := md5.New() + // io.Copy into md5 is very efficient as it's done in small chunks. + if _, err := io.Copy(hash, readSeeker); err != nil { + return nil, nil, "", err + } + readSeeker.Seek(0, io.SeekStart) + + h["ETag"] = fmt.Sprintf("%x", hash.Sum(nil)) + + return readSeeker, h, q.String(), nil +} + +// Create is a function that creates a new object or replaces an existing +// object. If the returned response's ETag header fails to match the local +// checksum, the failed request will automatically be retried up to a maximum +// of 3 times. +func Create(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateOptsBuilder) (r CreateResult) { + url := createURL(c, containerName, objectName) + h := make(map[string]string) + var b io.Reader + if opts != nil { + tmpB, headers, query, err := opts.ToObjectCreateParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + b = tmpB + } + + resp, err := c.Put(url, nil, nil, &gophercloud.RequestOpts{ + RawBody: b, + MoreHeaders: h, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// CopyOptsBuilder allows extensions to add additional parameters to the +// Copy request. +type CopyOptsBuilder interface { + ToObjectCopyMap() (map[string]string, error) +} + +// CopyOpts is a structure that holds parameters for copying one object to +// another. +type CopyOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + Destination string `h:"Destination" required:"true"` +} + +// ToObjectCopyMap formats a CopyOpts into a map of headers. +func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Copy is a function that copies one object to another. +func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) (r CopyResult) { + h := make(map[string]string) + headers, err := opts.ToObjectCopyMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + + url := copyURL(c, containerName, objectName) + resp, err := c.Request("COPY", url, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToObjectDeleteQuery() (string, error) +} + +// DeleteOpts is a structure that holds parameters for deleting an object. +type DeleteOpts struct { + MultipartManifest string `q:"multipart-manifest"` +} + +// ToObjectDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete is a function that deletes an object. +func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) (r DeleteResult) { + url := deleteURL(c, containerName, objectName) + if opts != nil { + query, err := opts.ToObjectDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + resp, err := c.Delete(url, nil) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// GetOptsBuilder allows extensions to add additional parameters to the +// Get request. +type GetOptsBuilder interface { + ToObjectGetParams() (map[string]string, string, error) +} + +// GetOpts is a structure that holds parameters for getting an object's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` + Expires string `q:"expires"` + Signature string `q:"signature"` +} + +// ToObjectGetParams formats a GetOpts into a query string and a map of headers. +func (opts GetOpts) ToObjectGetParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Get is a function that retrieves the metadata of an object. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) (r GetResult) { + url := getURL(c, containerName, objectName) + h := make(map[string]string) + if opts != nil { + headers, query, err := opts.ToObjectGetParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + } + + resp, err := c.Head(url, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToObjectUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting an object's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType bool `h:"X-Detect-Content-Type"` +} + +// ToObjectUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes an object's metadata. +func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToObjectUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + url := updateURL(c, containerName, objectName) + resp, err := c.Post(url, nil, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// HTTPMethod represents an HTTP method string (e.g. "GET"). +type HTTPMethod string + +var ( + // GET represents an HTTP "GET" method. + GET HTTPMethod = "GET" + + // POST represents an HTTP "POST" method. + POST HTTPMethod = "POST" +) + +// CreateTempURLOpts are options for creating a temporary URL for an object. +type CreateTempURLOpts struct { + // (REQUIRED) Method is the HTTP method to allow for users of the temp URL. + // Valid values are "GET" and "POST". + Method HTTPMethod + + // (REQUIRED) TTL is the number of seconds the temp URL should be active. + TTL int + + // (Optional) Split is the string on which to split the object URL. Since only + // the object path is used in the hash, the object URL needs to be parsed. If + // empty, the default OpenStack URL split point will be used ("/v1/"). + Split string +} + +// CreateTempURL is a function for creating a temporary URL for an object. It +// allows users to have "GET" or "POST" access to a particular tenant's object +// for a limited amount of time. +func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateTempURLOpts) (string, error) { + if opts.Split == "" { + opts.Split = "/v1/" + } + duration := time.Duration(opts.TTL) * time.Second + expiry := time.Now().Add(duration).Unix() + getHeader, err := accounts.Get(c, nil).Extract() + if err != nil { + return "", err + } + secretKey := []byte(getHeader.TempURLKey) + url := getURL(c, containerName, objectName) + splitPath := strings.Split(url, opts.Split) + baseURL, objectPath := splitPath[0], splitPath[1] + objectPath = opts.Split + objectPath + body := fmt.Sprintf("%s\n%d\n%s", opts.Method, expiry, objectPath) + hash := hmac.New(sha1.New, secretKey) + hash.Write([]byte(body)) + hexsum := fmt.Sprintf("%x", hash.Sum(nil)) + return fmt.Sprintf("%s%s?temp_url_sig=%s&temp_url_expires=%d", baseURL, objectPath, hexsum, expiry), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go new file mode 100644 index 000000000000..dd7c7044d0e3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go @@ -0,0 +1,580 @@ +package objects + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Object is a structure that holds information related to a storage object. +type Object struct { + // Bytes is the total number of bytes that comprise the object. + Bytes int64 `json:"bytes"` + + // ContentType is the content type of the object. + ContentType string `json:"content_type"` + + // Hash represents the MD5 checksum value of the object's content. + Hash string `json:"hash"` + + // LastModified is the time the object was last modified. + LastModified time.Time `json:"-"` + + // Name is the unique name for the object. + Name string `json:"name"` + + // Subdir denotes if the result contains a subdir. + Subdir string `json:"subdir"` +} + +func (r *Object) UnmarshalJSON(b []byte) error { + type tmp Object + var s *struct { + tmp + LastModified string `json:"last_modified"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Object(s.tmp) + + if s.LastModified != "" { + t, err := time.Parse(gophercloud.RFC3339MilliNoZ, s.LastModified) + if err != nil { + t, err = time.Parse(gophercloud.RFC3339Milli, s.LastModified) + if err != nil { + return err + } + } + r.LastModified = t + } + + return nil +} + +// ObjectPage is a single page of objects that is returned from a call to the +// List function. +type ObjectPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no object names. +func (r ObjectPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last object name in a ListResult. +func (r ObjectPage) LastMarker() (string, error) { + return extractLastMarker(r) +} + +// ExtractInfo is a function that takes a page of objects and returns their +// full information. +func ExtractInfo(r pagination.Page) ([]Object, error) { + var s []Object + err := (r.(ObjectPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a page of objects and returns only +// their names. +func ExtractNames(r pagination.Page) ([]string, error) { + casted := r.(ObjectPage) + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(r) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, object := range parsed { + if object.Subdir != "" { + names = append(names, object.Subdir) + } else { + names = append(names, object.Name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(r.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/html"): + return []string{}, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// DownloadHeader represents the headers returned in the response from a +// Download request. +type DownloadHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DownloadHeader) UnmarshalJSON(b []byte) error { + type tmp DownloadHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + StaticLargeObject interface{} `json:"X-Static-Large-Object"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DownloadHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch t := s.StaticLargeObject.(type) { + case string: + if t == "True" || t == "true" { + r.StaticLargeObject = true + } + case bool: + r.StaticLargeObject = t + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// DownloadResult is a *http.Response that is returned from a call to the +// Download function. +type DownloadResult struct { + gophercloud.HeaderResult + Body io.ReadCloser +} + +// Extract will return a struct of headers returned from a call to Download. +func (r DownloadResult) Extract() (*DownloadHeader, error) { + var s *DownloadHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractContent is a function that takes a DownloadResult's io.Reader body +// and reads all available data into a slice of bytes. Please be aware that due +// the nature of io.Reader is forward-only - meaning that it can only be read +// once and not rewound. You can recreate a reader from the output of this +// function by using bytes.NewReader(downloadBytes) +func (r *DownloadResult) ExtractContent() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + return body, nil +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + StaticLargeObject interface{} `json:"X-Static-Large-Object"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch t := s.StaticLargeObject.(type) { + case string: + if t == "True" || t == "true" { + r.StaticLargeObject = true + } + case bool: + r.StaticLargeObject = t + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// GetResult is a *http.Response that is returned from a call to the Get +// function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the object. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Object-Meta-") { + key := strings.TrimPrefix(k, "X-Object-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a +// Create request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + checksum string + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +func (r CreateResult) Extract() (*CreateHeader, error) { + //if r.Header.Get("ETag") != fmt.Sprintf("%x", localChecksum) { + // return nil, ErrWrongChecksum{} + //} + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a +// Update request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a +// Delete request. +type DeleteHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} + +// CopyHeader represents the headers returned in the response from a +// Copy request. +type CopyHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + CopiedFrom string `json:"X-Copied-From"` + CopiedFromLastModified time.Time `json:"-"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CopyHeader) UnmarshalJSON(b []byte) error { + type tmp CopyHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + CopiedFromLastModified gophercloud.JSONRFC1123 `json:"X-Copied-From-Last-Modified"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CopyHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.CopiedFromLastModified = time.Time(s.CopiedFromLastModified) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CopyResult represents the result of a copy operation. +type CopyResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Copy. +func (r CopyResult) Extract() (*CopyHeader, error) { + var s *CopyHeader + err := r.ExtractInto(&s) + return s, err +} + +// extractLastMarker is a function that takes a page of objects and returns the +// marker for the page. This can either be a subdir or the last object's name. +func extractLastMarker(r pagination.Page) (string, error) { + casted := r.(ObjectPage) + + // If a delimiter was requested, check if a subdir exists. + queryParams, err := url.ParseQuery(casted.URL.RawQuery) + if err != nil { + return "", err + } + + var delimeter bool + if v, ok := queryParams["delimiter"]; ok && len(v) > 0 { + delimeter = true + } + + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(r) + if err != nil { + return "", err + } + + var lastObject Object + if len(parsed) > 0 { + lastObject = parsed[len(parsed)-1] + } + + if !delimeter { + return lastObject.Name, nil + } + + if lastObject.Name != "" { + return lastObject.Name, nil + } + + return lastObject.Subdir, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(r.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names[len(names)-1], err + case strings.HasPrefix(ct, "text/html"): + return "", nil + default: + return "", fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go new file mode 100644 index 000000000000..b3ac304b7428 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go @@ -0,0 +1,33 @@ +package objects + +import ( + "github.com/gophercloud/gophercloud" +) + +func listURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func copyURL(c *gophercloud.ServiceClient, container, object string) string { + return c.ServiceURL(container, object) +} + +func createURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func getURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func deleteURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func downloadURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func updateURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} diff --git a/vendor/github.com/ncw/swift/.gitignore b/vendor/github.com/ncw/swift/.gitignore new file mode 100644 index 000000000000..5cdbab794776 --- /dev/null +++ b/vendor/github.com/ncw/swift/.gitignore @@ -0,0 +1,4 @@ +*~ +*.pyc +test-env* +junk/ \ No newline at end of file diff --git a/vendor/github.com/ncw/swift/.travis.yml b/vendor/github.com/ncw/swift/.travis.yml new file mode 100644 index 000000000000..e0a61643b0d8 --- /dev/null +++ b/vendor/github.com/ncw/swift/.travis.yml @@ -0,0 +1,33 @@ +language: go +sudo: false + +go: + - 1.2.x + - 1.3.x + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +matrix: + include: + - go: 1.12.x + env: TEST_REAL_SERVER=rackspace + - go: 1.12.x + env: TEST_REAL_SERVER=memset + allow_failures: + - go: 1.12.x + env: TEST_REAL_SERVER=rackspace + - go: 1.12.x + env: TEST_REAL_SERVER=memset +install: go test -i ./... +script: + - test -z "$(go fmt ./...)" + - go test + - ./travis_realserver.sh diff --git a/vendor/github.com/ncw/swift/COPYING b/vendor/github.com/ncw/swift/COPYING new file mode 100644 index 000000000000..8c27c67fd0a1 --- /dev/null +++ b/vendor/github.com/ncw/swift/COPYING @@ -0,0 +1,20 @@ +Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md new file mode 100644 index 000000000000..838ec623e947 --- /dev/null +++ b/vendor/github.com/ncw/swift/README.md @@ -0,0 +1,161 @@ +Swift +===== + +This package provides an easy to use library for interfacing with +Swift / Openstack Object Storage / Rackspace cloud files from the Go +Language + +See here for package docs + + http://godoc.org/github.com/ncw/swift + +[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift) + +Install +------- + +Use go to install the library + + go get github.com/ncw/swift + +Usage +----- + +See here for full package docs + +- http://godoc.org/github.com/ncw/swift + +Here is a short example from the docs +```go +import "github.com/ncw/swift" + +// Create a connection +c := swift.Connection{ + UserName: "user", + ApiKey: "key", + AuthUrl: "auth_url", + Domain: "domain", // Name of the domain (v3 auth only) + Tenant: "tenant", // Name of the tenant (v2 auth only) +} +// Authenticate +err := c.Authenticate() +if err != nil { + panic(err) +} +// List all the containers +containers, err := c.ContainerNames(nil) +fmt.Println(containers) +// etc... +``` + +Additions +--------- + +The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. + +Testing +------- + +To run the tests you can either use an embedded fake Swift server +either use a real Openstack Swift server or a Rackspace Cloud files account. + +When using a real Swift server, you need to set these environment variables +before running the tests + + export SWIFT_API_USER='user' + export SWIFT_API_KEY='key' + export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' + +And optionally these if using v2 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + +And optionally these if using v3 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + export SWIFT_API_DOMAIN_ID='domain id' + export SWIFT_API_DOMAIN='domain name' + +And optionally these if using v3 trust + + export SWIFT_TRUST_ID='TrustId' + +And optionally this if you want to skip server certificate validation + + export SWIFT_AUTH_INSECURE=1 + +And optionally this to configure the connect channel timeout, in seconds + + export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 + +And optionally this to configure the data channel timeout, in seconds + + export SWIFT_DATA_CHANNEL_TIMEOUT=60 + +Then run the tests with `go test` + +License +------- + +This is free software under the terms of MIT license (check COPYING file +included in this package). + +Contact and support +------------------- + +The project website is at: + +- https://github.com/ncw/swift + +There you can file bug reports, ask for help or contribute patches. + +Authors +------- + +- Nick Craig-Wood + +Contributors +------------ + +- Brian "bojo" Jones +- Janika Liiv +- Yamamoto, Hirotaka +- Stephen +- platformpurple +- Paul Querna +- Livio Soares +- thesyncim +- lsowen +- Sylvain Baubeau +- Chris Kastorff +- Dai HaoJun +- Hua Wang +- Fabian Ruff +- Arturo Reuschenbach Puncernau +- Petr Kotek +- Stefan Majewsky +- Cezar Sa Espinola +- Sam Gunaratne +- Richard Scothern +- Michel Couillard +- Christopher Waldon +- dennis +- hag +- Alexander Neumann +- eclipseo <30413512+eclipseo@users.noreply.github.com> +- Yuri Per +- Falk Reimann +- Arthur Paim Arnold +- Bruno Michel +- Charles Hsu +- Omar Ali +- Andreas Andersen +- kayrus +- CodeLingo Bot +- Jérémy Clerc +- 4xicom <37339705+4xicom@users.noreply.github.com> +- Bo +- Thiago da Silva +- Brandon WELSCH diff --git a/vendor/github.com/ncw/swift/auth.go b/vendor/github.com/ncw/swift/auth.go new file mode 100644 index 000000000000..25654f429cb1 --- /dev/null +++ b/vendor/github.com/ncw/swift/auth.go @@ -0,0 +1,335 @@ +package swift + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "strings" + "time" +) + +// Auth defines the operations needed to authenticate with swift +// +// This encapsulates the different authentication schemes in use +type Authenticator interface { + // Request creates an http.Request for the auth - return nil if not needed + Request(*Connection) (*http.Request, error) + // Response parses the http.Response + Response(resp *http.Response) error + // The public storage URL - set Internal to true to read + // internal/service net URL + StorageUrl(Internal bool) string + // The access token + Token() string + // The CDN url if available + CdnUrl() string +} + +// Expireser is an optional interface to read the expiration time of the token +type Expireser interface { + Expires() time.Time +} + +type CustomEndpointAuthenticator interface { + StorageUrlForEndpoint(endpointType EndpointType) string +} + +type EndpointType string + +const ( + // Use public URL as storage URL + EndpointTypePublic = EndpointType("public") + + // Use internal URL as storage URL + EndpointTypeInternal = EndpointType("internal") + + // Use admin URL as storage URL + EndpointTypeAdmin = EndpointType("admin") +) + +// newAuth - create a new Authenticator from the AuthUrl +// +// A hint for AuthVersion can be provided +func newAuth(c *Connection) (Authenticator, error) { + AuthVersion := c.AuthVersion + if AuthVersion == 0 { + if strings.Contains(c.AuthUrl, "v3") { + AuthVersion = 3 + } else if strings.Contains(c.AuthUrl, "v2") { + AuthVersion = 2 + } else if strings.Contains(c.AuthUrl, "v1") { + AuthVersion = 1 + } else { + return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly") + } + } + switch AuthVersion { + case 1: + return &v1Auth{}, nil + case 2: + return &v2Auth{ + // Guess as to whether using API key or + // password it will try both eventually so + // this is just an optimization. + useApiKey: len(c.ApiKey) >= 32, + }, nil + case 3: + return &v3Auth{}, nil + } + return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion) +} + +// ------------------------------------------------------------ + +// v1 auth +type v1Auth struct { + Headers http.Header // V1 auth: the authentication headers so extensions can access them +} + +// v1 Authentication - make request +func (auth *v1Auth) Request(c *Connection) (*http.Request, error) { + req, err := http.NewRequest("GET", c.AuthUrl, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.UserAgent) + req.Header.Set("X-Auth-Key", c.ApiKey) + req.Header.Set("X-Auth-User", c.UserName) + return req, nil +} + +// v1 Authentication - read response +func (auth *v1Auth) Response(resp *http.Response) error { + auth.Headers = resp.Header + return nil +} + +// v1 Authentication - read storage url +func (auth *v1Auth) StorageUrl(Internal bool) string { + storageUrl := auth.Headers.Get("X-Storage-Url") + if Internal { + newUrl, err := url.Parse(storageUrl) + if err != nil { + return storageUrl + } + newUrl.Host = "snet-" + newUrl.Host + storageUrl = newUrl.String() + } + return storageUrl +} + +// v1 Authentication - read auth token +func (auth *v1Auth) Token() string { + return auth.Headers.Get("X-Auth-Token") +} + +// v1 Authentication - read cdn url +func (auth *v1Auth) CdnUrl() string { + return auth.Headers.Get("X-CDN-Management-Url") +} + +// ------------------------------------------------------------ + +// v2 Authentication +type v2Auth struct { + Auth *v2AuthResponse + Region string + useApiKey bool // if set will use API key not Password + useApiKeyOk bool // if set won't change useApiKey any more + notFirst bool // set after first run +} + +// v2 Authentication - make request +func (auth *v2Auth) Request(c *Connection) (*http.Request, error) { + auth.Region = c.Region + // Toggle useApiKey if not first run and not OK yet + if auth.notFirst && !auth.useApiKeyOk { + auth.useApiKey = !auth.useApiKey + } + auth.notFirst = true + // Create a V2 auth request for the body of the connection + var v2i interface{} + if !auth.useApiKey { + // Normal swift authentication + v2 := v2AuthRequest{} + v2.Auth.PasswordCredentials.UserName = c.UserName + v2.Auth.PasswordCredentials.Password = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } else { + // Rackspace special with API Key + v2 := v2AuthRequestRackspace{} + v2.Auth.ApiKeyCredentials.UserName = c.UserName + v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } + body, err := json.Marshal(v2i) + if err != nil { + return nil, err + } + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", c.UserAgent) + return req, nil +} + +// v2 Authentication - read response +func (auth *v2Auth) Response(resp *http.Response) error { + auth.Auth = new(v2AuthResponse) + err := readJson(resp, auth.Auth) + // If successfully read Auth then no need to toggle useApiKey any more + if err == nil { + auth.useApiKeyOk = true + } + return err +} + +// Finds the Endpoint Url of "type" from the v2AuthResponse using the +// Region if set or defaulting to the first one if not +// +// Returns "" if not found +func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string { + for _, catalog := range auth.Auth.Access.ServiceCatalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if auth.Region == "" || (auth.Region == endpoint.Region) { + switch endpointType { + case EndpointTypeInternal: + return endpoint.InternalUrl + case EndpointTypePublic: + return endpoint.PublicUrl + case EndpointTypeAdmin: + return endpoint.AdminUrl + default: + return "" + } + } + } + } + } + return "" +} + +// v2 Authentication - read storage url +// +// If Internal is true then it reads the private (internal / service +// net) URL. +func (auth *v2Auth) StorageUrl(Internal bool) string { + endpointType := EndpointTypePublic + if Internal { + endpointType = EndpointTypeInternal + } + return auth.StorageUrlForEndpoint(endpointType) +} + +// v2 Authentication - read storage url +// +// Use the indicated endpointType to choose a URL. +func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string { + return auth.endpointUrl("object-store", endpointType) +} + +// v2 Authentication - read auth token +func (auth *v2Auth) Token() string { + return auth.Auth.Access.Token.Id +} + +// v2 Authentication - read expires +func (auth *v2Auth) Expires() time.Time { + t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires) + if err != nil { + return time.Time{} // return Zero if not parsed + } + return t +} + +// v2 Authentication - read cdn url +func (auth *v2Auth) CdnUrl() string { + return auth.endpointUrl("rax:object-cdn", EndpointTypePublic) +} + +// ------------------------------------------------------------ + +// V2 Authentication request +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequest struct { + Auth struct { + PasswordCredentials struct { + UserName string `json:"username"` + Password string `json:"password"` + } `json:"passwordCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication request - Rackspace variant +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequestRackspace struct { + Auth struct { + ApiKeyCredentials struct { + UserName string `json:"username"` + ApiKey string `json:"apiKey"` + } `json:"RAX-KSKEY:apiKeyCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication reply +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthResponse struct { + Access struct { + ServiceCatalog []struct { + Endpoints []struct { + InternalUrl string + PublicUrl string + AdminUrl string + Region string + TenantId string + } + Name string + Type string + } + Token struct { + Expires string + Id string + Tenant struct { + Id string + Name string + } + } + User struct { + DefaultRegion string `json:"RAX-AUTH:defaultRegion"` + Id string + Name string + Roles []struct { + Description string + Id string + Name string + TenantId string + } + } + } +} diff --git a/vendor/github.com/ncw/swift/auth_v3.go b/vendor/github.com/ncw/swift/auth_v3.go new file mode 100644 index 000000000000..1e34ad81464e --- /dev/null +++ b/vendor/github.com/ncw/swift/auth_v3.go @@ -0,0 +1,300 @@ +package swift + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + v3AuthMethodToken = "token" + v3AuthMethodPassword = "password" + v3AuthMethodApplicationCredential = "application_credential" + v3CatalogTypeObjectStore = "object-store" +) + +// V3 Authentication request +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://developer.openstack.org/api-ref-identity-v3.html +type v3AuthRequest struct { + Auth struct { + Identity struct { + Methods []string `json:"methods"` + Password *v3AuthPassword `json:"password,omitempty"` + Token *v3AuthToken `json:"token,omitempty"` + ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"` + } `json:"identity"` + Scope *v3Scope `json:"scope,omitempty"` + } `json:"auth"` +} + +type v3Scope struct { + Project *v3Project `json:"project,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` + Trust *v3Trust `json:"OS-TRUST:trust,omitempty"` +} + +type v3Domain struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +type v3Project struct { + Name string `json:"name,omitempty"` + Id string `json:"id,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` +} + +type v3Trust struct { + Id string `json:"id"` +} + +type v3User struct { + Domain *v3Domain `json:"domain,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Password string `json:"password,omitempty"` +} + +type v3AuthToken struct { + Id string `json:"id"` +} + +type v3AuthPassword struct { + User v3User `json:"user"` +} + +type v3AuthApplicationCredential struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Secret string `json:"secret,omitempty"` + User *v3User `json:"user,omitempty"` +} + +// V3 Authentication response +type v3AuthResponse struct { + Token struct { + ExpiresAt string `json:"expires_at"` + IssuedAt string `json:"issued_at"` + Methods []string + Roles []struct { + Id, Name string + Links struct { + Self string + } + } + + Project struct { + Domain struct { + Id, Name string + } + Id, Name string + } + + Catalog []struct { + Id, Namem, Type string + Endpoints []struct { + Id, Region_Id, Url, Region string + Interface EndpointType + } + } + + User struct { + Id, Name string + Domain struct { + Id, Name string + Links struct { + Self string + } + } + } + + Audit_Ids []string + } +} + +type v3Auth struct { + Region string + Auth *v3AuthResponse + Headers http.Header +} + +func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { + auth.Region = c.Region + + var v3i interface{} + + v3 := v3AuthRequest{} + + if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" { + var user *v3User + + if c.ApplicationCredentialId != "" { + c.ApplicationCredentialName = "" + user = &v3User{} + } + + if user == nil && c.UserId != "" { + // UserID could be used without the domain information + user = &v3User{ + Id: c.UserId, + } + } + + if user == nil && c.UserName == "" { + // Make sure that Username or UserID are provided + return nil, fmt.Errorf("UserID or Name should be provided") + } + + if user == nil && c.DomainId != "" { + user = &v3User{ + Name: c.UserName, + Domain: &v3Domain{ + Id: c.DomainId, + }, + } + } + + if user == nil && c.Domain != "" { + user = &v3User{ + Name: c.UserName, + Domain: &v3Domain{ + Name: c.Domain, + }, + } + } + + // Make sure that DomainID or DomainName are provided among Username + if user == nil { + return nil, fmt.Errorf("DomainID or Domain should be provided") + } + + v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential} + v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{ + Id: c.ApplicationCredentialId, + Name: c.ApplicationCredentialName, + Secret: c.ApplicationCredentialSecret, + User: user, + } + } else if c.UserName == "" && c.UserId == "" { + v3.Auth.Identity.Methods = []string{v3AuthMethodToken} + v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} + } else { + v3.Auth.Identity.Methods = []string{v3AuthMethodPassword} + v3.Auth.Identity.Password = &v3AuthPassword{ + User: v3User{ + Name: c.UserName, + Id: c.UserId, + Password: c.ApiKey, + }, + } + + var domain *v3Domain + + if c.Domain != "" { + domain = &v3Domain{Name: c.Domain} + } else if c.DomainId != "" { + domain = &v3Domain{Id: c.DomainId} + } + v3.Auth.Identity.Password.User.Domain = domain + } + + if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential { + if c.TrustId != "" { + v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}} + } else if c.TenantId != "" || c.Tenant != "" { + + v3.Auth.Scope = &v3Scope{Project: &v3Project{}} + + if c.TenantId != "" { + v3.Auth.Scope.Project.Id = c.TenantId + } else if c.Tenant != "" { + v3.Auth.Scope.Project.Name = c.Tenant + switch { + case c.TenantDomain != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain} + case c.TenantDomainId != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId} + case c.Domain != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain} + case c.DomainId != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId} + default: + v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"} + } + } + } + } + + v3i = v3 + + body, err := json.Marshal(v3i) + + if err != nil { + return nil, err + } + + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "auth/tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", c.UserAgent) + return req, nil +} + +func (auth *v3Auth) Response(resp *http.Response) error { + auth.Auth = &v3AuthResponse{} + auth.Headers = resp.Header + err := readJson(resp, auth.Auth) + return err +} + +func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string { + for _, catalog := range auth.Auth.Token.Catalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) { + return endpoint.Url + } + } + } + } + return "" +} + +func (auth *v3Auth) StorageUrl(Internal bool) string { + endpointType := EndpointTypePublic + if Internal { + endpointType = EndpointTypeInternal + } + return auth.StorageUrlForEndpoint(endpointType) +} + +func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string { + return auth.endpointUrl("object-store", endpointType) +} + +func (auth *v3Auth) Token() string { + return auth.Headers.Get("X-Subject-Token") +} + +func (auth *v3Auth) Expires() time.Time { + t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt) + if err != nil { + return time.Time{} // return Zero if not parsed + } + return t +} + +func (auth *v3Auth) CdnUrl() string { + return "" +} diff --git a/vendor/github.com/ncw/swift/compatibility_1_0.go b/vendor/github.com/ncw/swift/compatibility_1_0.go new file mode 100644 index 000000000000..7b69a757a1c3 --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_1_0.go @@ -0,0 +1,28 @@ +// Go 1.0 compatibility functions + +// +build !go1.1 + +package swift + +import ( + "log" + "net/http" + "time" +) + +// Cancel the request - doesn't work under < go 1.1 +func cancelRequest(transport http.RoundTripper, req *http.Request) { + log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1") +} + +// Reset a timer - Doesn't work properly < go 1.1 +// +// This is quite hard to do properly under go < 1.1 so we do a crude +// approximation and hope that everyone upgrades to go 1.1 quickly +func resetTimer(t *time.Timer, d time.Duration) { + t.Stop() + // Very likely this doesn't actually work if we are already + // selecting on t.C. However we've stopped the original timer + // so won't break transfers but may not time them out :-( + *t = *time.NewTimer(d) +} diff --git a/vendor/github.com/ncw/swift/compatibility_1_1.go b/vendor/github.com/ncw/swift/compatibility_1_1.go new file mode 100644 index 000000000000..a4f9c3ab2424 --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_1_1.go @@ -0,0 +1,24 @@ +// Go 1.1 and later compatibility functions +// +// +build go1.1 + +package swift + +import ( + "net/http" + "time" +) + +// Cancel the request +func cancelRequest(transport http.RoundTripper, req *http.Request) { + if tr, ok := transport.(interface { + CancelRequest(*http.Request) + }); ok { + tr.CancelRequest(req) + } +} + +// Reset a timer +func resetTimer(t *time.Timer, d time.Duration) { + t.Reset(d) +} diff --git a/vendor/github.com/ncw/swift/compatibility_1_6.go b/vendor/github.com/ncw/swift/compatibility_1_6.go new file mode 100644 index 000000000000..b443d01d2a86 --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_1_6.go @@ -0,0 +1,23 @@ +// +build go1.6 + +package swift + +import ( + "net/http" + "time" +) + +const IS_AT_LEAST_GO_16 = true + +func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) { + tr.ExpectContinueTimeout = t +} + +func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) { + if req.Body != nil { + req.Header.Add("Expect", "100-continue") + } + if !hasContentLength { + req.TransferEncoding = []string{"chunked"} + } +} diff --git a/vendor/github.com/ncw/swift/compatibility_not_1_6.go b/vendor/github.com/ncw/swift/compatibility_not_1_6.go new file mode 100644 index 000000000000..aabb44e2b77f --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_not_1_6.go @@ -0,0 +1,13 @@ +// +build !go1.6 + +package swift + +import ( + "net/http" + "time" +) + +const IS_AT_LEAST_GO_16 = false + +func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {} +func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {} diff --git a/vendor/github.com/ncw/swift/dlo.go b/vendor/github.com/ncw/swift/dlo.go new file mode 100644 index 000000000000..05a1927b393e --- /dev/null +++ b/vendor/github.com/ncw/swift/dlo.go @@ -0,0 +1,149 @@ +package swift + +import ( + "os" + "strings" +) + +// DynamicLargeObjectCreateFile represents an open static large object +type DynamicLargeObjectCreateFile struct { + largeObjectCreateFile +} + +// DynamicLargeObjectCreateFile creates a dynamic large object +// returning an object which satisfies io.Writer, io.Seeker, io.Closer +// and io.ReaderFrom. The flags are as passes to the +// largeObjectCreate method. +func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { + lo, err := c.largeObjectCreate(opts) + if err != nil { + return nil, err + } + + return withBuffer(opts, &DynamicLargeObjectCreateFile{ + largeObjectCreateFile: *lo, + }), nil +} + +// DynamicLargeObjectCreate creates or truncates an existing dynamic +// large object returning a writeable object. This sets opts.Flags to +// an appropriate value before calling DynamicLargeObjectCreateFile +func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { + opts.Flags = os.O_TRUNC | os.O_CREATE + return c.DynamicLargeObjectCreateFile(opts) +} + +// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments. +func (c *Connection) DynamicLargeObjectDelete(container string, path string) error { + return c.LargeObjectDelete(container, path) +} + +// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName +func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { + info, headers, err := c.Object(srcContainer, srcObjectName) + if err != nil { + return err + } + + segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"]) + if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType, sanitizeLargeObjectMoveHeaders(headers)); err != nil { + return err + } + + if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { + return err + } + + return nil +} + +func sanitizeLargeObjectMoveHeaders(headers Headers) Headers { + sanitizedHeaders := make(map[string]string, len(headers)) + for k, v := range headers { + if strings.HasPrefix(k, "X-") { //Some of the fields does not effect the request e,g, X-Timestamp, X-Trans-Id, X-Openstack-Request-Id. Open stack will generate new ones anyway. + sanitizedHeaders[k] = v + } + } + return sanitizedHeaders +} + +// createDLOManifest creates a dynamic large object manifest +func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string, headers Headers) error { + if headers == nil { + headers = make(Headers) + } + headers["X-Object-Manifest"] = prefix + manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers) + if err != nil { + return err + } + + if err := manifest.Close(); err != nil { + return err + } + + return nil +} + +// Close satisfies the io.Closer interface +func (file *DynamicLargeObjectCreateFile) Close() error { + return file.Flush() +} + +func (file *DynamicLargeObjectCreateFile) Flush() error { + err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType, file.headers) + if err != nil { + return err + } + return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) +} + +func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) { + //a simple container listing works 99.9% of the time + segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath}) + if err != nil { + return nil, err + } + + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentName := getSegment(segmentPath, segmentNumber) + if _, seen := hasObjectName[segmentName]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := c.Object(segmentContainer, segmentName) + switch err { + case nil: + //found new segment -> add it in the correct position and keep + //going, more might be missing + if segmentNumber <= len(segments) { + segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...) + segments[segmentNumber-1] = segment + } else { + segments = append(segments, segment) + } + continue + case ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } + } +} diff --git a/vendor/github.com/ncw/swift/doc.go b/vendor/github.com/ncw/swift/doc.go new file mode 100644 index 000000000000..44efde7bf824 --- /dev/null +++ b/vendor/github.com/ncw/swift/doc.go @@ -0,0 +1,19 @@ +/* +Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files + +Standard Usage + +Most of the work is done through the Container*() and Object*() methods. + +All methods are safe to use concurrently in multiple go routines. + +Object Versioning + +As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system. + +Rackspace Sub Module + +This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects. + +*/ +package swift diff --git a/vendor/github.com/ncw/swift/go.mod b/vendor/github.com/ncw/swift/go.mod new file mode 100644 index 000000000000..29f6ee2cb82a --- /dev/null +++ b/vendor/github.com/ncw/swift/go.mod @@ -0,0 +1 @@ +module github.com/ncw/swift diff --git a/vendor/github.com/ncw/swift/largeobjects.go b/vendor/github.com/ncw/swift/largeobjects.go new file mode 100644 index 000000000000..bec640b00e0e --- /dev/null +++ b/vendor/github.com/ncw/swift/largeobjects.go @@ -0,0 +1,448 @@ +package swift + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + gopath "path" + "strconv" + "strings" + "time" +) + +// NotLargeObject is returned if an operation is performed on an object which isn't large. +var NotLargeObject = errors.New("Not a large object") + +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + +// largeObjectCreateFile represents an open static or dynamic large object +type largeObjectCreateFile struct { + conn *Connection + container string + objectName string + currentLength int64 + filePos int64 + chunkSize int64 + segmentContainer string + prefix string + contentType string + checkHash bool + segments []Object + headers Headers + minChunkSize int64 +} + +func swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func getSegment(segmentPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) +} + +func parseFullPath(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} + +func (headers Headers) IsLargeObjectDLO() bool { + _, isDLO := headers["X-Object-Manifest"] + return isDLO +} + +func (headers Headers) IsLargeObjectSLO() bool { + _, isSLO := headers["X-Static-Large-Object"] + return isSLO +} + +func (headers Headers) IsLargeObject() bool { + return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO() +} + +func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) { + if manifest, isDLO := headers["X-Object-Manifest"]; isDLO { + segmentContainer, segmentPath := parseFullPath(manifest) + segments, err := c.getAllDLOSegments(segmentContainer, segmentPath) + return segmentContainer, segments, err + } + if headers.IsLargeObjectSLO() { + return c.getAllSLOSegments(container, path) + } + return "", nil, NotLargeObject +} + +// LargeObjectOpts describes how a large object should be created +type LargeObjectOpts struct { + Container string // Name of container to place object + ObjectName string // Name of object + Flags int // Creation flags + CheckHash bool // If set Check the hash + Hash string // If set use this hash to check + ContentType string // Content-Type of the object + Headers Headers // Additional headers to upload the object with + ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set + MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info + SegmentContainer string // Name of the container to place segments + SegmentPrefix string // Prefix to use for the segments + NoBuffer bool // Prevents using a bufio.Writer to write segments +} + +type LargeObjectFile interface { + io.Writer + io.Seeker + io.Closer + Size() int64 + Flush() error +} + +// largeObjectCreate creates a large object at opts.Container, opts.ObjectName. +// +// opts.Flags can have the following bits set +// os.TRUNC - remove the contents of the large object if it exists +// os.APPEND - write at the end of the large object +func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) { + var ( + segmentPath string + segmentContainer string + segments []Object + currentLength int64 + err error + ) + + if opts.SegmentPrefix != "" { + segmentPath = opts.SegmentPrefix + } else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil { + return nil, err + } + + if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil { + if opts.Flags&os.O_TRUNC != 0 { + c.LargeObjectDelete(opts.Container, opts.ObjectName) + } else { + currentLength = info.Bytes + if headers.IsLargeObject() { + segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers) + if err != nil { + return nil, err + } + if len(segments) > 0 { + segmentPath = gopath.Dir(segments[0].Name) + } + } else { + if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil { + return nil, err + } + segments = append(segments, info) + } + } + } else if err != ObjectNotFound { + return nil, err + } + + // segmentContainer is not empty when the manifest already existed + if segmentContainer == "" { + if opts.SegmentContainer != "" { + segmentContainer = opts.SegmentContainer + } else { + segmentContainer = opts.Container + "_segments" + } + } + + file := &largeObjectCreateFile{ + conn: c, + checkHash: opts.CheckHash, + container: opts.Container, + objectName: opts.ObjectName, + chunkSize: opts.ChunkSize, + minChunkSize: opts.MinChunkSize, + headers: opts.Headers, + segmentContainer: segmentContainer, + prefix: segmentPath, + segments: segments, + currentLength: currentLength, + } + + if file.chunkSize == 0 { + file.chunkSize = 10 * 1024 * 1024 + } + + if file.minChunkSize > file.chunkSize { + file.chunkSize = file.minChunkSize + } + + if opts.Flags&os.O_APPEND != 0 { + file.filePos = currentLength + } + + return file, nil +} + +// LargeObjectDelete deletes the large object named by container, path +func (c *Connection) LargeObjectDelete(container string, objectName string) error { + _, headers, err := c.Object(container, objectName) + if err != nil { + return err + } + + var objects [][]string + if headers.IsLargeObject() { + segmentContainer, segments, err := c.getAllSegments(container, objectName, headers) + if err != nil { + return err + } + for _, obj := range segments { + objects = append(objects, []string{segmentContainer, obj.Name}) + } + } + objects = append(objects, []string{container, objectName}) + + info, err := c.cachedQueryInfo() + if err == nil && info.SupportsBulkDelete() && len(objects) > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj[0] + "/" + obj[1] + } + _, err = c.doBulkDelete(filenames) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != Forbidden && err != ObjectNotFound { + return err + } + } else { + for _, obj := range objects { + if err := c.ObjectDelete(obj[0], obj[1]); err != nil { + return err + } + } + } + + return nil +} + +// LargeObjectGetSegments returns all the segments that compose an object +// If the object is a Dynamic Large Object (DLO), it just returns the objects +// that have the prefix as indicated by the manifest. +// If the object is a Static Large Object (SLO), it retrieves the JSON content +// of the manifest and return all the segments of it. +func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) { + _, headers, err := c.Object(container, path) + if err != nil { + return "", nil, err + } + + return c.getAllSegments(container, path, headers) +} + +// Seek sets the offset for the next write operation +func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + file.filePos = offset + case 1: + file.filePos += offset + case 2: + file.filePos = file.currentLength + offset + default: + return -1, fmt.Errorf("invalid value for whence") + } + if file.filePos < 0 { + return -1, fmt.Errorf("negative offset") + } + return file.filePos, nil +} + +func (file *largeObjectCreateFile) Size() int64 { + return file.currentLength +} + +func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) { + endTimer := time.NewTimer(readAfterWriteTimeout) + defer endTimer.Stop() + waitingTime := readAfterWriteWait + for { + var headers Headers + var sz int64 + if headers, sz, err = fn(); err == nil { + if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz { + return + } + } else { + return + } + waitTimer := time.NewTimer(waitingTime) + select { + case <-endTimer.C: + waitTimer.Stop() + err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz) + return + case <-waitTimer.C: + waitingTime *= 2 + } + } +} + +func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) { + err = withLORetry(expectedSize, func() (Headers, int64, error) { + var info Object + var headers Headers + info, headers, err = c.objectBase(container, objectName) + if err != nil { + return headers, 0, err + } + return headers, info.Bytes, nil + }) + return +} + +// Write satisfies the io.Writer interface +func (file *largeObjectCreateFile) Write(buf []byte) (int, error) { + var sz int64 + var relativeFilePos int + writeSegmentIdx := 0 + for i, obj := range file.segments { + if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) { + relativeFilePos = int(file.filePos - sz) + break + } + writeSegmentIdx++ + sz += obj.Bytes + } + sizeToWrite := len(buf) + for offset := 0; offset < sizeToWrite; { + newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos) + if err != nil { + return 0, err + } + if writeSegmentIdx < len(file.segments) { + file.segments[writeSegmentIdx] = *newSegment + } else { + file.segments = append(file.segments, *newSegment) + } + offset += n + writeSegmentIdx++ + relativeFilePos = 0 + } + file.filePos += int64(sizeToWrite) + file.currentLength = 0 + for _, obj := range file.segments { + file.currentLength += obj.Bytes + } + return sizeToWrite, nil +} + +func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) { + var ( + readers []io.Reader + existingSegment *Object + segmentSize int + ) + segmentName := getSegment(file.prefix, writeSegmentIdx+1) + sizeToRead := int(file.chunkSize) + if writeSegmentIdx < len(file.segments) { + existingSegment = &file.segments[writeSegmentIdx] + if writeSegmentIdx != len(file.segments)-1 { + sizeToRead = int(existingSegment.Bytes) + } + if relativeFilePos > 0 { + headers := make(Headers) + headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10) + existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) + if err != nil { + return nil, 0, err + } + defer existingSegmentReader.Close() + sizeToRead -= relativeFilePos + segmentSize += relativeFilePos + readers = []io.Reader{existingSegmentReader} + } + } + if sizeToRead > len(buf) { + sizeToRead = len(buf) + } + segmentSize += sizeToRead + readers = append(readers, bytes.NewReader(buf[:sizeToRead])) + if existingSegment != nil && segmentSize < int(existingSegment.Bytes) { + headers := make(Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-" + tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) + if err != nil { + return nil, 0, err + } + defer tailSegmentReader.Close() + segmentSize = int(existingSegment.Bytes) + readers = append(readers, tailSegmentReader) + } + segmentReader := io.MultiReader(readers...) + headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil) + if err != nil { + return nil, 0, err + } + return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil +} + +func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile { + if !opts.NoBuffer { + return &bufferedLargeObjectFile{ + LargeObjectFile: lo, + bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)), + } + } + return lo +} + +type bufferedLargeObjectFile struct { + LargeObjectFile + bw *bufio.Writer +} + +func (blo *bufferedLargeObjectFile) Close() error { + err := blo.bw.Flush() + if err != nil { + return err + } + return blo.LargeObjectFile.Close() +} + +func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) { + return blo.bw.Write(p) +} + +func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) { + err := blo.bw.Flush() + if err != nil { + return 0, err + } + return blo.LargeObjectFile.Seek(offset, whence) +} + +func (blo *bufferedLargeObjectFile) Size() int64 { + return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered()) +} + +func (blo *bufferedLargeObjectFile) Flush() error { + err := blo.bw.Flush() + if err != nil { + return err + } + return blo.LargeObjectFile.Flush() +} diff --git a/vendor/github.com/ncw/swift/meta.go b/vendor/github.com/ncw/swift/meta.go new file mode 100644 index 000000000000..7e149e1390ef --- /dev/null +++ b/vendor/github.com/ncw/swift/meta.go @@ -0,0 +1,174 @@ +// Metadata manipulation in and out of Headers + +package swift + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" +) + +// Metadata stores account, container or object metadata. +type Metadata map[string]string + +// Metadata gets the Metadata starting with the metaPrefix out of the Headers. +// +// The keys in the Metadata will be converted to lower case +func (h Headers) Metadata(metaPrefix string) Metadata { + m := Metadata{} + metaPrefix = http.CanonicalHeaderKey(metaPrefix) + for key, value := range h { + if strings.HasPrefix(key, metaPrefix) { + metaKey := strings.ToLower(key[len(metaPrefix):]) + m[metaKey] = value + } + } + return m +} + +// AccountMetadata converts Headers from account to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) AccountMetadata() Metadata { + return h.Metadata("X-Account-Meta-") +} + +// ContainerMetadata converts Headers from container to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ContainerMetadata() Metadata { + return h.Metadata("X-Container-Meta-") +} + +// ObjectMetadata converts Headers from object to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ObjectMetadata() Metadata { + return h.Metadata("X-Object-Meta-") +} + +// Headers convert the Metadata starting with the metaPrefix into a +// Headers. +// +// The keys in the Metadata will be converted from lower case to http +// Canonical (see http.CanonicalHeaderKey). +func (m Metadata) Headers(metaPrefix string) Headers { + h := Headers{} + for key, value := range m { + key = http.CanonicalHeaderKey(metaPrefix + key) + h[key] = value + } + return h +} + +// AccountHeaders converts the Metadata for the account. +func (m Metadata) AccountHeaders() Headers { + return m.Headers("X-Account-Meta-") +} + +// ContainerHeaders converts the Metadata for the container. +func (m Metadata) ContainerHeaders() Headers { + return m.Headers("X-Container-Meta-") +} + +// ObjectHeaders converts the Metadata for the object. +func (m Metadata) ObjectHeaders() Headers { + return m.Headers("X-Object-Meta-") +} + +// Turns a number of ns into a floating point string in seconds +// +// Trims trailing zeros and guaranteed to be perfectly accurate +func nsToFloatString(ns int64) string { + if ns < 0 { + return "-" + nsToFloatString(-ns) + } + result := fmt.Sprintf("%010d", ns) + split := len(result) - 9 + result, decimals := result[:split], result[split:] + decimals = strings.TrimRight(decimals, "0") + if decimals != "" { + result += "." + result += decimals + } + return result +} + +// Turns a floating point string in seconds into a ns integer +// +// Guaranteed to be perfectly accurate +func floatStringToNs(s string) (int64, error) { + const zeros = "000000000" + if point := strings.IndexRune(s, '.'); point >= 0 { + tail := s[point+1:] + if fill := 9 - len(tail); fill < 0 { + tail = tail[:9] + } else { + tail += zeros[:fill] + } + s = s[:point] + tail + } else if len(s) > 0 { // Make sure empty string produces an error + s += zeros + } + return strconv.ParseInt(s, 10, 64) +} + +// FloatStringToTime converts a floating point number string to a time.Time +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number should be in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z" +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +// +// If an error is returned then time will be returned as the zero time. +func FloatStringToTime(s string) (t time.Time, err error) { + ns, err := floatStringToNs(s) + if err != nil { + return + } + t = time.Unix(0, ns) + return +} + +// TimeToFloatString converts a time.Time object to a floating point string +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number is in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped +// from the output. +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +func TimeToFloatString(t time.Time) string { + return nsToFloatString(t.UnixNano()) +} + +// GetModTime reads a modification time (mtime) from a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +// +// If an error is returned then time will be returned as the zero time. +func (m Metadata) GetModTime() (t time.Time, err error) { + return FloatStringToTime(m["mtime"]) +} + +// SetModTime writes an modification time (mtime) to a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +func (m Metadata) SetModTime(t time.Time) { + m["mtime"] = TimeToFloatString(t) +} diff --git a/vendor/github.com/ncw/swift/notes.txt b/vendor/github.com/ncw/swift/notes.txt new file mode 100644 index 000000000000..f738552cd8ad --- /dev/null +++ b/vendor/github.com/ncw/swift/notes.txt @@ -0,0 +1,55 @@ +Notes on Go Swift +================= + +Make a builder style interface like the Google Go APIs? Advantages +are that it is easy to add named methods to the service object to do +specific things. Slightly less efficient. Not sure about how to +return extra stuff though - in an object? + +Make a container struct so these could be methods on it? + +Make noResponse check for 204? + +Make storage public so it can be extended easily? + +Rename to go-swift to match user agent string? + +Reconnect on auth error - 401 when token expires isn't tested + +Make more api compatible with python cloudfiles? + +Retry operations on timeout / network errors? +- also 408 error +- GET requests only? + +Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock + +Add extra headers field to Connection (for via etc) + +Make errors use an error heirachy then can catch them with a type assertion + + Error(...) + ObjectCorrupted{ Error } + +Make a Debug flag in connection for logging stuff + +Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc + +Object range + +Object create, update with X-Delete-At or X-Delete-After + +Large object support +- check uploads are less than 5GB in normal mode? + +Access control CORS? + +Swift client retries and backs off for all types of errors + +Implement net error interface? + +type Error interface { + error + Timeout() bool // Is the error a timeout? + Temporary() bool // Is the error temporary? +} diff --git a/vendor/github.com/ncw/swift/slo.go b/vendor/github.com/ncw/swift/slo.go new file mode 100644 index 000000000000..6a10ddfc0560 --- /dev/null +++ b/vendor/github.com/ncw/swift/slo.go @@ -0,0 +1,171 @@ +package swift + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" +) + +// StaticLargeObjectCreateFile represents an open static large object +type StaticLargeObjectCreateFile struct { + largeObjectCreateFile +} + +var SLONotSupported = errors.New("SLO not supported") + +type swiftSegment struct { + Path string `json:"path,omitempty"` + Etag string `json:"etag,omitempty"` + Size int64 `json:"size_bytes,omitempty"` + // When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes` + // but when querying the JSON content of a manifest with the `multipart-manifest=get` + // parameter, Swift names those attributes `name`, `hash` and `bytes`. + // We use all the different attributes names in this structure to be able to use + // the same structure for both uploading and retrieving. + Name string `json:"name,omitempty"` + Hash string `json:"hash,omitempty"` + Bytes int64 `json:"bytes,omitempty"` + ContentType string `json:"content_type,omitempty"` + LastModified string `json:"last_modified,omitempty"` +} + +// StaticLargeObjectCreateFile creates a static large object returning +// an object which satisfies io.Writer, io.Seeker, io.Closer and +// io.ReaderFrom. The flags are as passed to the largeObjectCreate +// method. +func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { + info, err := c.cachedQueryInfo() + if err != nil || !info.SupportsSLO() { + return nil, SLONotSupported + } + realMinChunkSize := info.SLOMinSegmentSize() + if realMinChunkSize > opts.MinChunkSize { + opts.MinChunkSize = realMinChunkSize + } + lo, err := c.largeObjectCreate(opts) + if err != nil { + return nil, err + } + return withBuffer(opts, &StaticLargeObjectCreateFile{ + largeObjectCreateFile: *lo, + }), nil +} + +// StaticLargeObjectCreate creates or truncates an existing static +// large object returning a writeable object. This sets opts.Flags to +// an appropriate value before calling StaticLargeObjectCreateFile +func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { + opts.Flags = os.O_TRUNC | os.O_CREATE + return c.StaticLargeObjectCreateFile(opts) +} + +// StaticLargeObjectDelete deletes a static large object and all of its segments. +func (c *Connection) StaticLargeObjectDelete(container string, path string) error { + info, err := c.cachedQueryInfo() + if err != nil || !info.SupportsSLO() { + return SLONotSupported + } + return c.LargeObjectDelete(container, path) +} + +// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName +func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { + swiftInfo, err := c.cachedQueryInfo() + if err != nil || !swiftInfo.SupportsSLO() { + return SLONotSupported + } + info, headers, err := c.Object(srcContainer, srcObjectName) + if err != nil { + return err + } + + container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers) + if err != nil { + return err + } + + //copy only metadata during move (other headers might not be safe for copying) + headers = headers.ObjectMetadata().ObjectHeaders() + + if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil { + return err + } + + if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { + return err + } + + return nil +} + +// createSLOManifest creates a static large object manifest +func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error { + sloSegments := make([]swiftSegment, len(segments)) + for i, segment := range segments { + sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name) + sloSegments[i].Etag = segment.Hash + sloSegments[i].Size = segment.Bytes + } + + content, err := json.Marshal(sloSegments) + if err != nil { + return err + } + + values := url.Values{} + values.Set("multipart-manifest", "put") + if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil { + return err + } + + return nil +} + +func (file *StaticLargeObjectCreateFile) Close() error { + return file.Flush() +} + +func (file *StaticLargeObjectCreateFile) Flush() error { + if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil { + return err + } + return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) +} + +func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) { + var ( + segmentList []swiftSegment + segments []Object + segPath string + segmentContainer string + ) + + values := url.Values{} + values.Set("multipart-manifest", "get") + + file, _, err := c.objectOpen(container, path, true, nil, values) + if err != nil { + return "", nil, err + } + + content, err := ioutil.ReadAll(file) + if err != nil { + return "", nil, err + } + + json.Unmarshal(content, &segmentList) + for _, segment := range segmentList { + segmentContainer, segPath = parseFullPath(segment.Name[1:]) + segments = append(segments, Object{ + Name: segPath, + Bytes: segment.Bytes, + Hash: segment.Hash, + }) + } + + return segmentContainer, segments, nil +} diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go new file mode 100644 index 000000000000..217647b9a4d1 --- /dev/null +++ b/vendor/github.com/ncw/swift/swift.go @@ -0,0 +1,2270 @@ +package swift + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DefaultUserAgent = "goswift/1.0" // Default user agent + DefaultRetries = 3 // Default number of retries on token expiry + TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC + UploadTar = "tar" // Data format specifier for Connection.BulkUpload(). + UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload(). + UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload(). + allContainersLimit = 10000 // Number of containers to fetch at once + allObjectsLimit = 10000 // Number objects to fetch at once + allObjectsChanLimit = 1000 // ...when fetching to a channel +) + +// ObjectType is the type of the swift object, regular, static large, +// or dynamic large. +type ObjectType int + +// Values that ObjectType can take +const ( + RegularObjectType ObjectType = iota + StaticLargeObjectType + DynamicLargeObjectType +) + +// Connection holds the details of the connection to the swift server. +// +// You need to provide UserName, ApiKey and AuthUrl when you create a +// connection then call Authenticate on it. +// +// The auth version in use will be detected from the AuthURL - you can +// override this with the AuthVersion parameter. +// +// If using v2 auth you can also set Region in the Connection +// structure. If you don't set Region you will get the default region +// which may not be what you want. +// +// For reference some common AuthUrls looks like this: +// +// Rackspace US https://auth.api.rackspacecloud.com/v1.0 +// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 +// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 +// Memset Memstore UK https://auth.storage.memset.com/v1.0 +// Memstore v2 https://auth.storage.memset.com/v2.0 +// +// When using Google Appengine you must provide the Connection with an +// appengine-specific Transport: +// +// import ( +// "appengine/urlfetch" +// "fmt" +// "github.com/ncw/swift" +// ) +// +// func handler(w http.ResponseWriter, r *http.Request) { +// ctx := appengine.NewContext(r) +// tr := urlfetch.Transport{Context: ctx} +// c := swift.Connection{ +// UserName: "user", +// ApiKey: "key", +// AuthUrl: "auth_url", +// Transport: tr, +// } +// _ := c.Authenticate() +// containers, _ := c.ContainerNames(nil) +// fmt.Fprintf(w, "containers: %q", containers) +// } +// +// If you don't supply a Transport, one is made which relies on +// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment). +// This means that the connection will respect the HTTP proxy specified by the +// environment variables $HTTP_PROXY and $NO_PROXY. +type Connection struct { + // Parameters - fill these in before calling Authenticate + // They are all optional except UserName, ApiKey and AuthUrl + Domain string // User's domain name + DomainId string // User's domain Id + UserName string // UserName for api + UserId string // User Id + ApiKey string // Key for api access + ApplicationCredentialId string // Application Credential ID + ApplicationCredentialName string // Application Credential Name + ApplicationCredentialSecret string // Application Credential Secret + AuthUrl string // Auth URL + Retries int // Retries on error (default is 3) + UserAgent string // Http User agent (default goswift/1.0) + ConnectTimeout time.Duration // Connect channel timeout (default 10s) + Timeout time.Duration // Data channel timeout (default 60s) + Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only) + AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect + Internal bool // Set this to true to use the the internal / service network + Tenant string // Name of the tenant (v2,v3 auth only) + TenantId string // Id of the tenant (v2,v3 auth only) + EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set) + TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain + TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain + TrustId string // Id of the trust (v3 auth only) + Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine) + // These are filled in after Authenticate is called as are the defaults for above + StorageUrl string + AuthToken string + Expires time.Time // time the token expires, may be Zero if unknown + client *http.Client + Auth Authenticator `json:"-" xml:"-"` // the current authenticator + authLock *sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth + // swiftInfo is filled after QueryInfo is called + swiftInfo SwiftInfo +} + +// setFromEnv reads the value that param points to (it must be a +// pointer), if it isn't the zero value then it reads the environment +// variable name passed in, parses it according to the type and writes +// it to the pointer. +func setFromEnv(param interface{}, name string) (err error) { + val := os.Getenv(name) + if val == "" { + return + } + switch result := param.(type) { + case *string: + if *result == "" { + *result = val + } + case *int: + if *result == 0 { + *result, err = strconv.Atoi(val) + } + case *bool: + if *result == false { + *result, err = strconv.ParseBool(val) + } + case *time.Duration: + if *result == 0 { + *result, err = time.ParseDuration(val) + } + case *EndpointType: + if *result == EndpointType("") { + *result = EndpointType(val) + } + default: + return newErrorf(0, "can't set var of type %T", param) + } + return err +} + +// ApplyEnvironment reads environment variables and applies them to +// the Connection structure. It won't overwrite any parameters which +// are already set in the Connection struct. +// +// To make a new Connection object entirely from the environment you +// would do: +// +// c := new(Connection) +// err := c.ApplyEnvironment() +// if err != nil { log.Fatal(err) } +// +// The naming of these variables follows the official Openstack naming +// scheme so it should be compatible with OpenStack rc files. +// +// For v1 authentication (obsolete) +// ST_AUTH - Auth URL +// ST_USER - UserName for api +// ST_KEY - Key for api access +// +// For v2 authentication +// OS_AUTH_URL - Auth URL +// OS_USERNAME - UserName for api +// OS_PASSWORD - Key for api access +// OS_TENANT_NAME - Name of the tenant +// OS_TENANT_ID - Id of the tenant +// OS_REGION_NAME - Region to use - default is use first region +// +// For v3 authentication +// OS_AUTH_URL - Auth URL +// OS_USERNAME - UserName for api +// OS_USER_ID - User Id +// OS_PASSWORD - Key for api access +// OS_APPLICATION_CREDENTIAL_ID - Application Credential ID +// OS_APPLICATION_CREDENTIAL_NAME - Application Credential Name +// OS_APPLICATION_CREDENTIAL_SECRET - Application Credential Secret +// OS_USER_DOMAIN_NAME - User's domain name +// OS_USER_DOMAIN_ID - User's domain Id +// OS_PROJECT_NAME - Name of the project +// OS_PROJECT_DOMAIN_NAME - Name of the tenant's domain, only needed if it differs from the user domain +// OS_PROJECT_DOMAIN_ID - Id of the tenant's domain, only needed if it differs the from user domain +// OS_TRUST_ID - If of the trust +// OS_REGION_NAME - Region to use - default is use first region +// +// Other +// OS_ENDPOINT_TYPE - Endpoint type public, internal or admin +// ST_AUTH_VERSION - Choose auth version - 1, 2 or 3 or leave at 0 for autodetect +// +// For manual authentication +// OS_STORAGE_URL - storage URL from alternate authentication +// OS_AUTH_TOKEN - Auth Token from alternate authentication +// +// Library specific +// GOSWIFT_RETRIES - Retries on error (default is 3) +// GOSWIFT_USER_AGENT - HTTP User agent (default goswift/1.0) +// GOSWIFT_CONNECT_TIMEOUT - Connect channel timeout with unit, eg "10s", "100ms" (default "10s") +// GOSWIFT_TIMEOUT - Data channel timeout with unit, eg "10s", "100ms" (default "60s") +// GOSWIFT_INTERNAL - Set this to "true" to use the the internal network (obsolete - use OS_ENDPOINT_TYPE) +func (c *Connection) ApplyEnvironment() (err error) { + for _, item := range []struct { + result interface{} + name string + }{ + // Environment variables - keep in same order as Connection + {&c.Domain, "OS_USER_DOMAIN_NAME"}, + {&c.DomainId, "OS_USER_DOMAIN_ID"}, + {&c.UserName, "OS_USERNAME"}, + {&c.UserId, "OS_USER_ID"}, + {&c.ApiKey, "OS_PASSWORD"}, + {&c.ApplicationCredentialId, "OS_APPLICATION_CREDENTIAL_ID"}, + {&c.ApplicationCredentialName, "OS_APPLICATION_CREDENTIAL_NAME"}, + {&c.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET"}, + {&c.AuthUrl, "OS_AUTH_URL"}, + {&c.Retries, "GOSWIFT_RETRIES"}, + {&c.UserAgent, "GOSWIFT_USER_AGENT"}, + {&c.ConnectTimeout, "GOSWIFT_CONNECT_TIMEOUT"}, + {&c.Timeout, "GOSWIFT_TIMEOUT"}, + {&c.Region, "OS_REGION_NAME"}, + {&c.AuthVersion, "ST_AUTH_VERSION"}, + {&c.Internal, "GOSWIFT_INTERNAL"}, + {&c.Tenant, "OS_TENANT_NAME"}, //v2 + {&c.Tenant, "OS_PROJECT_NAME"}, // v3 + {&c.TenantId, "OS_TENANT_ID"}, + {&c.EndpointType, "OS_ENDPOINT_TYPE"}, + {&c.TenantDomain, "OS_PROJECT_DOMAIN_NAME"}, + {&c.TenantDomainId, "OS_PROJECT_DOMAIN_ID"}, + {&c.TrustId, "OS_TRUST_ID"}, + {&c.StorageUrl, "OS_STORAGE_URL"}, + {&c.AuthToken, "OS_AUTH_TOKEN"}, + // v1 auth alternatives + {&c.ApiKey, "ST_KEY"}, + {&c.UserName, "ST_USER"}, + {&c.AuthUrl, "ST_AUTH"}, + } { + err = setFromEnv(item.result, item.name) + if err != nil { + return newErrorf(0, "failed to read env var %q: %v", item.name, err) + } + } + return nil +} + +// Error - all errors generated by this package are of this type. Other error +// may be passed on from library functions though. +type Error struct { + StatusCode int // HTTP status code if relevant or 0 if not + Text string +} + +// Error satisfy the error interface. +func (e *Error) Error() string { + return e.Text +} + +// newError make a new error from a string. +func newError(StatusCode int, Text string) *Error { + return &Error{ + StatusCode: StatusCode, + Text: Text, + } +} + +// newErrorf makes a new error from sprintf parameters. +func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error { + return newError(StatusCode, fmt.Sprintf(Text, Parameters...)) +} + +// errorMap defines http error codes to error mappings. +type errorMap map[int]error + +var ( + // Specific Errors you might want to check for equality + NotModified = newError(304, "Not Modified") + BadRequest = newError(400, "Bad Request") + AuthorizationFailed = newError(401, "Authorization Failed") + ContainerNotFound = newError(404, "Container Not Found") + ContainerNotEmpty = newError(409, "Container Not Empty") + ObjectNotFound = newError(404, "Object Not Found") + ObjectCorrupted = newError(422, "Object Corrupted") + TimeoutError = newError(408, "Timeout when reading or writing data") + Forbidden = newError(403, "Operation forbidden") + TooLargeObject = newError(413, "Too Large Object") + RateLimit = newError(498, "Rate Limit") + TooManyRequests = newError(429, "TooManyRequests") + + // Mappings for authentication errors + authErrorMap = errorMap{ + 400: BadRequest, + 401: AuthorizationFailed, + 403: Forbidden, + } + + // Mappings for container errors + ContainerErrorMap = errorMap{ + 400: BadRequest, + 403: Forbidden, + 404: ContainerNotFound, + 409: ContainerNotEmpty, + 498: RateLimit, + } + + // Mappings for object errors + objectErrorMap = errorMap{ + 304: NotModified, + 400: BadRequest, + 403: Forbidden, + 404: ObjectNotFound, + 413: TooLargeObject, + 422: ObjectCorrupted, + 429: TooManyRequests, + 498: RateLimit, + } +) + +// checkClose is used to check the return from Close in a defer +// statement. +func checkClose(c io.Closer, err *error) { + cerr := c.Close() + if *err == nil { + *err = cerr + } +} + +// drainAndClose discards all data from rd and closes it. +// If an error occurs during Read, it is discarded. +func drainAndClose(rd io.ReadCloser, err *error) { + if rd == nil { + return + } + + _, _ = io.Copy(ioutil.Discard, rd) + cerr := rd.Close() + if err != nil && *err == nil { + *err = cerr + } +} + +// parseHeaders checks a response for errors and translates into +// standard errors if necessary. If an error is returned, resp.Body +// has been drained and closed. +func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { + if errorMap != nil { + if err, ok := errorMap[resp.StatusCode]; ok { + drainAndClose(resp.Body, nil) + return err + } + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + drainAndClose(resp.Body, nil) + return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) + } + return nil +} + +// readHeaders returns a Headers object from the http.Response. +// +// If it receives multiple values for a key (which should never +// happen) it will use the first one +func readHeaders(resp *http.Response) Headers { + headers := Headers{} + for key, values := range resp.Header { + headers[key] = values[0] + } + return headers +} + +// Headers stores HTTP headers (can only have one of each header like Swift). +type Headers map[string]string + +// Does an http request using the running timer passed in +func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) { + // Do the request in the background so we can check the timeout + type result struct { + resp *http.Response + err error + } + done := make(chan result, 1) + go func() { + resp, err := c.client.Do(req) + done <- result{resp, err} + }() + // Wait for the read or the timeout + select { + case r := <-done: + return r.resp, r.err + case <-timer.C: + // Kill the connection on timeout so we don't leak sockets or goroutines + cancelRequest(c.Transport, req) + return nil, TimeoutError + } + panic("unreachable") // For Go 1.0 +} + +// Set defaults for any unset values +// +// Call with authLock held +func (c *Connection) setDefaults() { + if c.UserAgent == "" { + c.UserAgent = DefaultUserAgent + } + if c.Retries == 0 { + c.Retries = DefaultRetries + } + if c.ConnectTimeout == 0 { + c.ConnectTimeout = 10 * time.Second + } + if c.Timeout == 0 { + c.Timeout = 60 * time.Second + } + if c.Transport == nil { + t := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + // Half of linux's default open files limit (1024). + MaxIdleConnsPerHost: 512, + } + SetExpectContinueTimeout(t, 5*time.Second) + c.Transport = t + } + if c.client == nil { + c.client = &http.Client{ + // CheckRedirect: redirectPolicyFunc, + Transport: c.Transport, + } + } +} + +// Authenticate connects to the Swift server. +// +// If you don't call it before calling one of the connection methods +// then it will be called for you on the first access. +func (c *Connection) Authenticate() (err error) { + if c.authLock == nil { + c.authLock = &sync.Mutex{} + } + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticate() +} + +// Internal implementation of Authenticate +// +// Call with authLock held +func (c *Connection) authenticate() (err error) { + c.setDefaults() + + // Flush the keepalives connection - if we are + // re-authenticating then stuff has gone wrong + flushKeepaliveConnections(c.Transport) + + if c.Auth == nil { + c.Auth, err = newAuth(c) + if err != nil { + return + } + } + + retries := 1 +again: + var req *http.Request + req, err = c.Auth.Request(c) + if err != nil { + return + } + if req != nil { + timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() + var resp *http.Response + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + return + } + defer func() { + drainAndClose(resp.Body, &err) + // Flush the auth connection - we don't want to keep + // it open if keepalives were enabled + flushKeepaliveConnections(c.Transport) + }() + if err = c.parseHeaders(resp, authErrorMap); err != nil { + // Try again for a limited number of times on + // AuthorizationFailed or BadRequest. This allows us + // to try some alternate forms of the request + if (err == AuthorizationFailed || err == BadRequest) && retries > 0 { + retries-- + goto again + } + return + } + err = c.Auth.Response(resp) + if err != nil { + return + } + } + if customAuth, isCustom := c.Auth.(CustomEndpointAuthenticator); isCustom && c.EndpointType != "" { + c.StorageUrl = customAuth.StorageUrlForEndpoint(c.EndpointType) + } else { + c.StorageUrl = c.Auth.StorageUrl(c.Internal) + } + c.AuthToken = c.Auth.Token() + if do, ok := c.Auth.(Expireser); ok { + c.Expires = do.Expires() + } else { + c.Expires = time.Time{} + } + + if !c.authenticated() { + err = newError(0, "Response didn't have storage url and auth token") + return + } + return +} + +// Get an authToken and url +// +// The Url may be updated if it needed to authenticate using the OnReAuth function +func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) { + c.authLock.Lock() + defer c.authLock.Unlock() + targetUrlOut = targetUrlIn + if !c.authenticated() { + err = c.authenticate() + if err != nil { + return + } + if OnReAuth != nil { + targetUrlOut, err = OnReAuth() + if err != nil { + return + } + } + } + authToken = c.AuthToken + return +} + +// flushKeepaliveConnections is called to flush pending requests after an error. +func flushKeepaliveConnections(transport http.RoundTripper) { + if tr, ok := transport.(interface { + CloseIdleConnections() + }); ok { + tr.CloseIdleConnections() + } +} + +// UnAuthenticate removes the authentication from the Connection. +func (c *Connection) UnAuthenticate() { + c.authLock.Lock() + c.StorageUrl = "" + c.AuthToken = "" + c.authLock.Unlock() +} + +// Authenticated returns a boolean to show if the current connection +// is authenticated. +// +// Doesn't actually check the credentials against the server. +func (c *Connection) Authenticated() bool { + if c.authLock == nil { + c.authLock = &sync.Mutex{} + } + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticated() +} + +// Internal version of Authenticated() +// +// Call with authLock held +func (c *Connection) authenticated() bool { + if c.StorageUrl == "" || c.AuthToken == "" { + return false + } + if c.Expires.IsZero() { + return true + } + timeUntilExpiry := c.Expires.Sub(time.Now()) + return timeUntilExpiry >= 60*time.Second +} + +// SwiftInfo contains the JSON object returned by Swift when the /info +// route is queried. The object contains, among others, the Swift version, +// the enabled middlewares and their configuration +type SwiftInfo map[string]interface{} + +func (i SwiftInfo) SupportsBulkDelete() bool { + _, val := i["bulk_delete"] + return val +} + +func (i SwiftInfo) SupportsSLO() bool { + _, val := i["slo"] + return val +} + +func (i SwiftInfo) SLOMinSegmentSize() int64 { + if slo, ok := i["slo"].(map[string]interface{}); ok { + val, _ := slo["min_segment_size"].(float64) + return int64(val) + } + return 1 +} + +// Discover Swift configuration by doing a request against /info +func (c *Connection) QueryInfo() (infos SwiftInfo, err error) { + infoUrl, err := url.Parse(c.StorageUrl) + if err != nil { + return nil, err + } + infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info") + resp, err := c.client.Get(infoUrl.String()) + if err == nil { + if resp.StatusCode != http.StatusOK { + drainAndClose(resp.Body, nil) + return nil, fmt.Errorf("Invalid status code for info request: %d", resp.StatusCode) + } + err = readJson(resp, &infos) + if err == nil { + c.authLock.Lock() + c.swiftInfo = infos + c.authLock.Unlock() + } + return infos, err + } + return nil, err +} + +func (c *Connection) cachedQueryInfo() (infos SwiftInfo, err error) { + c.authLock.Lock() + infos = c.swiftInfo + c.authLock.Unlock() + if infos == nil { + infos, err = c.QueryInfo() + if err != nil { + return + } + } + return infos, nil +} + +// RequestOpts contains parameters for Connection.storage. +type RequestOpts struct { + Container string + ObjectName string + Operation string + Parameters url.Values + Headers Headers + ErrorMap errorMap + NoResponse bool + Body io.Reader + Retries int + // if set this is called on re-authentication to refresh the targetUrl + OnReAuth func() (string, error) +} + +// Call runs a remote command on the targetUrl, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the targetUrl +// +// Returns a response or an error. If response is returned then +// the resp.Body must be read completely and +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// If "Content-Length" is set in p.Headers it will be used - this can +// be used to override the default chunked transfer encoding for +// uploads. +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +// +// This method is exported so extensions can call it. +func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) { + c.authLock.Lock() + c.setDefaults() + c.authLock.Unlock() + retries := p.Retries + if retries == 0 { + retries = c.Retries + } + var req *http.Request + for { + var authToken string + if targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth); err != nil { + return //authentication failure + } + var URL *url.URL + URL, err = url.Parse(targetUrl) + if err != nil { + return + } + if p.Container != "" { + URL.Path += "/" + p.Container + if p.ObjectName != "" { + URL.Path += "/" + p.ObjectName + } + } + if p.Parameters != nil { + URL.RawQuery = p.Parameters.Encode() + } + timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() + reader := p.Body + if reader != nil { + reader = newWatchdogReader(reader, c.Timeout, timer) + } + req, err = http.NewRequest(p.Operation, URL.String(), reader) + if err != nil { + return + } + if p.Headers != nil { + for k, v := range p.Headers { + // Set ContentLength in req if the user passed it in in the headers + if k == "Content-Length" { + req.ContentLength, err = strconv.ParseInt(v, 10, 64) + if err != nil { + err = fmt.Errorf("Invalid %q header %q: %v", k, v, err) + return + } + } else { + req.Header.Add(k, v) + } + } + } + req.Header.Add("User-Agent", c.UserAgent) + req.Header.Add("X-Auth-Token", authToken) + + _, hasCL := p.Headers["Content-Length"] + AddExpectAndTransferEncoding(req, hasCL) + + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 { + retries-- + continue + } + return + } + // Check to see if token has expired + if resp.StatusCode == 401 && retries > 0 { + drainAndClose(resp.Body, nil) + c.UnAuthenticate() + retries-- + } else { + break + } + } + + headers = readHeaders(resp) + if err = c.parseHeaders(resp, p.ErrorMap); err != nil { + return + } + if p.NoResponse { + drainAndClose(resp.Body, &err) + if err != nil { + return + } + } else { + // Cancel the request on timeout + cancel := func() { + cancelRequest(c.Transport, req) + } + // Wrap resp.Body to make it obey an idle timeout + resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel) + } + return +} + +// storage runs a remote command on a the storage url, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the storage url +// +// Returns a response or an error. If response is returned then +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) { + p.OnReAuth = func() (string, error) { + return c.StorageUrl, nil + } + c.authLock.Lock() + url := c.StorageUrl + c.authLock.Unlock() + return c.Call(url, p) +} + +// readLines reads the response into an array of strings. +// +// Closes the response when done +func readLines(resp *http.Response) (lines []string, err error) { + defer drainAndClose(resp.Body, &err) + reader := bufio.NewReader(resp.Body) + buffer := bytes.NewBuffer(make([]byte, 0, 128)) + var part []byte + var prefix bool + for { + if part, prefix, err = reader.ReadLine(); err != nil { + break + } + buffer.Write(part) + if !prefix { + lines = append(lines, buffer.String()) + buffer.Reset() + } + } + if err == io.EOF { + err = nil + } + return +} + +// readJson reads the response into the json type passed in +// +// Closes the response when done +func readJson(resp *http.Response, result interface{}) (err error) { + defer drainAndClose(resp.Body, &err) + decoder := json.NewDecoder(resp.Body) + return decoder.Decode(result) +} + +/* ------------------------------------------------------------ */ + +// ContainersOpts is options for Containers() and ContainerNames() +type ContainersOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Prefix string // Given a string value x, return container names matching the specified prefix. + Marker string // Given a string value x, return container names greater in value than the specified marker. + EndMarker string // Given a string value x, return container names less in value than the specified marker. + Headers Headers // Any additional HTTP headers - can be nil +} + +// parse the ContainerOpts +func (opts *ContainersOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Prefix != "" { + v.Set("prefix", opts.Prefix) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + h = opts.Headers + } + return v, h +} + +// ContainerNames returns a slice of names of containers in this account. +func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + lines, err := readLines(resp) + return lines, err +} + +// Container contains information about a container +type Container struct { + Name string // Name of the container + Count int64 // Number of objects in the container + Bytes int64 // Total number of bytes used in the container +} + +// Containers returns a slice of structures with full information as +// described in Container. +func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var containers []Container + err = readJson(resp, &containers) + return containers, err +} + +// containersAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +func containersAllOpts(opts *ContainersOpts) *ContainersOpts { + var newOpts ContainersOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = allContainersLimit + } + newOpts.Marker = "" + return &newOpts +} + +// ContainersAll is like Containers but it returns all the Containers +// +// It calls Containers multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { + opts = containersAllOpts(opts) + containers := make([]Container, 0) + for { + newContainers, err := c.Containers(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1].Name + } + return containers, nil +} + +// ContainerNamesAll is like ContainerNamess but it returns all the Containers +// +// It calls ContainerNames multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) { + opts = containersAllOpts(opts) + containers := make([]string, 0) + for { + newContainers, err := c.ContainerNames(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1] + } + return containers, nil +} + +/* ------------------------------------------------------------ */ + +// ObjectOpts is options for Objects() and ObjectNames() +type ObjectsOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Marker string // Given a string value x, return object names greater in value than the specified marker. + EndMarker string // Given a string value x, return object names less in value than the specified marker + Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x. + Path string // For a string value x, return the object names nested in the pseudo path + Delimiter rune // For a character c, return all the object names nested in the container + Headers Headers // Any additional HTTP headers - can be nil + KeepMarker bool // Do not reset Marker when using ObjectsAll or ObjectNamesAll +} + +// parse reads values out of ObjectsOpts +func (opts *ObjectsOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + if opts.Prefix != "" { + v.Set("prefix", opts.Prefix) + } + if opts.Path != "" { + v.Set("path", opts.Path) + } + if opts.Delimiter != 0 { + v.Set("delimiter", string(opts.Delimiter)) + } + h = opts.Headers + } + return v, h +} + +// ObjectNames returns a slice of names of objects in a given container. +func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + return readLines(resp) +} + +// Object contains information about an object +type Object struct { + Name string `json:"name"` // object name + ContentType string `json:"content_type"` // eg application/directory + Bytes int64 `json:"bytes"` // size in bytes + ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server + LastModified time.Time // Last modified time converted to a time.Time + Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + SLOHash string `json:"slo_etag"` // MD5 hash of all segments' MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist + SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" + ObjectType ObjectType // type of this object +} + +// Objects returns a slice of Object with information about each +// object in the container. +// +// If Delimiter is set in the opts then PseudoDirectory may be set, +// with ContentType 'application/directory'. These are not real +// objects but represent directories of objects which haven't had an +// object created for them. +func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var objects []Object + err = readJson(resp, &objects) + // Convert Pseudo directories and dates + for i := range objects { + object := &objects[i] + if object.SubDir != "" { + object.Name = object.SubDir + object.PseudoDirectory = true + object.ContentType = "application/directory" + } + if object.ServerLastModified != "" { + // 2012-11-11T14:49:47.887250 + // + // Remove fractional seconds if present. This + // then keeps it consistent with Object + // which can only return timestamps accurate + // to 1 second + // + // The TimeFormat will parse fractional + // seconds if desired though + datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0] + object.LastModified, err = time.Parse(TimeFormat, datetime) + if err != nil { + return nil, err + } + } + if object.SLOHash != "" { + object.ObjectType = StaticLargeObjectType + } + } + return objects, err +} + +// objectsAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +// Marker is not overriden if KeepMarker is set +func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts { + var newOpts ObjectsOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = Limit + } + if !newOpts.KeepMarker { + newOpts.Marker = "" + } + return &newOpts +} + +// A closure defined by the caller to iterate through all objects +// +// Call Objects or ObjectNames from here with the *ObjectOpts passed in +// +// Do whatever is required with the results then return them +type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error) + +// ObjectsWalk is uses to iterate through all the objects in chunks as +// returned by Objects or ObjectNames using the Marker and Limit +// parameters in the ObjectsOpts. +// +// Pass in a closure `walkFn` which calls Objects or ObjectNames with +// the *ObjectsOpts passed to it and does something with the results. +// +// Errors will be returned from this function +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error { + opts = objectsAllOpts(opts, allObjectsChanLimit) + for { + objects, err := walkFn(opts) + if err != nil { + return err + } + var n int + var last string + switch objects := objects.(type) { + case []string: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1] + } + case []Object: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1].Name + } + default: + panic("Unknown type returned to ObjectsWalk") + } + if n < opts.Limit { + break + } + opts.Marker = last + } + return nil +} + +// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice +// +// It calls Objects multiple times using the Marker parameter +func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) { + objects := make([]Object, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.Objects(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// ObjectNamesAll is like ObjectNames but it returns all the Objects +// +// It calls ObjectNames multiple times using the Marker parameter. Marker is +// reset unless KeepMarker is set +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) { + objects := make([]string, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.ObjectNames(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// Account contains information about this account. +type Account struct { + BytesUsed int64 // total number of bytes used + Containers int64 // total number of containers + Objects int64 // total number of objects +} + +// getInt64FromHeader is a helper function to decode int64 from header. +func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) { + value := resp.Header.Get(header) + result, err = strconv.ParseInt(value, 10, 64) + if err != nil { + err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err) + } + return +} + +// Account returns info about the account in an Account struct. +func (c *Connection) Account() (info Account, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into a dict + // + // {'Accept-Ranges': 'bytes', + // 'Content-Length': '0', + // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT', + // 'X-Account-Bytes-Used': '316598182', + // 'X-Account-Container-Count': '4', + // 'X-Account-Object-Count': '1433'} + if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil { + return + } + if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil { + return + } + if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil { + return + } + return +} + +// AccountUpdate adds, replaces or remove account metadata. +// +// Add or update keys by mentioning them in the Headers. +// +// Remove keys by setting them to an empty string. +func (c *Connection) AccountUpdate(h Headers) error { + _, _, err := c.storage(RequestOpts{ + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerCreate creates a container. +// +// If you don't want to add Headers just pass in nil +// +// No error is returned if it already exists but the metadata if any will be updated. +func (c *Connection) ContainerCreate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "PUT", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerDelete deletes a container. +// +// May return ContainerDoesNotExist or ContainerNotEmpty +func (c *Connection) ContainerDelete(container string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "DELETE", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + return err +} + +// Container returns info about a single container including any +// metadata in the headers. +func (c *Connection) Container(container string) (info Container, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + info.Name = container + if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil { + return + } + if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil { + return + } + return +} + +// ContainerUpdate adds, replaces or removes container metadata. +// +// Add or update keys by mentioning them in the Metadata. +// +// Remove keys by setting them to an empty string. +// +// Container metadata can only be read with Container() not with Containers(). +func (c *Connection) ContainerUpdate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ------------------------------------------------------------ + +// ObjectCreateFile represents a swift object open for writing +type ObjectCreateFile struct { + checkHash bool // whether we are checking the hash + pipeReader *io.PipeReader // pipe for the caller to use + pipeWriter *io.PipeWriter + hash hash.Hash // hash being build up as we go along + done chan struct{} // signals when the upload has finished + resp *http.Response // valid when done has signalled + err error // ditto + headers Headers // ditto +} + +// Write bytes to the object - see io.Writer +func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { + n, err = file.pipeWriter.Write(p) + if err == io.ErrClosedPipe { + if file.err != nil { + return 0, file.err + } + return 0, newError(500, "Write on closed file") + } + if err == nil && file.checkHash { + _, _ = file.hash.Write(p) + } + return +} + +// Close the object and checks the md5sum if it was required. +// +// Also returns any other errors from the server (eg container not +// found) so it is very important to check the errors on this method. +func (file *ObjectCreateFile) Close() error { + // Close the body + err := file.pipeWriter.Close() + if err != nil { + return err + } + + // Wait for the HTTP operation to complete + <-file.done + + // Check errors + if file.err != nil { + return file.err + } + if file.checkHash { + receivedMd5 := strings.ToLower(file.headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + return ObjectCorrupted + } + } + return nil +} + +// Headers returns the response headers from the created object if the upload +// has been completed. The Close() method must be called on an ObjectCreateFile +// before this method. +func (file *ObjectCreateFile) Headers() (Headers, error) { + // error out if upload is not complete. + select { + case <-file.done: + default: + return nil, fmt.Errorf("Cannot get metadata, object upload failed or has not yet completed.") + } + return file.headers, nil +} + +// Check it satisfies the interface +var _ io.WriteCloser = &ObjectCreateFile{} + +// objectPutHeaders create a set of headers for a PUT +// +// It guesses the contentType from the objectName if it isn't set +// +// checkHash may be changed +func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers { + if contentType == "" { + contentType = mime.TypeByExtension(path.Ext(objectName)) + if contentType == "" { + contentType = "application/octet-stream" + } + } + // Meta stuff + extraHeaders := map[string]string{ + "Content-Type": contentType, + } + for key, value := range h { + extraHeaders[key] = value + } + if Hash != "" { + extraHeaders["Etag"] = Hash + *checkHash = false // the server will do it + } + return extraHeaders +} + +// ObjectCreate creates or updates the object in the container. It +// returns an io.WriteCloser you should write the contents to. You +// MUST call Close() on it and you MUST check the error return from +// Close(). +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted on Close() +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted on Close() if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + pipeReader, pipeWriter := io.Pipe() + file = &ObjectCreateFile{ + hash: md5.New(), + checkHash: checkHash, + pipeReader: pipeReader, + pipeWriter: pipeWriter, + done: make(chan struct{}), + } + // Run the PUT in the background piping it data + go func() { + opts := RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: pipeReader, + NoResponse: true, + ErrorMap: objectErrorMap, + } + file.resp, file.headers, file.err = c.storage(opts) + // Signal finished + pipeReader.Close() + close(file.done) + }() + return +} + +func (c *Connection) ObjectSymlinkCreate(container string, symlink string, targetAccount string, targetContainer string, targetObject string, targetEtag string) (headers Headers, err error) { + + EMPTY_MD5 := "d41d8cd98f00b204e9800998ecf8427e" + symHeaders := Headers{} + contents := bytes.NewBufferString("") + if targetAccount != "" { + symHeaders["X-Symlink-Target-Account"] = targetAccount + } + if targetEtag != "" { + symHeaders["X-Symlink-Target-Etag"] = targetEtag + } + symHeaders["X-Symlink-Target"] = fmt.Sprintf("%s/%s", targetContainer, targetObject) + _, err = c.ObjectPut(container, symlink, contents, true, EMPTY_MD5, "application/symlink", symHeaders) + return +} + +func (c *Connection) objectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers, parameters url.Values) (headers Headers, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + hash := md5.New() + var body io.Reader = contents + if checkHash { + body = io.TeeReader(contents, hash) + } + _, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: body, + NoResponse: true, + ErrorMap: objectErrorMap, + Parameters: parameters, + }) + if err != nil { + return + } + if checkHash { + receivedMd5 := strings.ToLower(headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + return +} + +// ObjectPut creates or updates the path in the container from +// contents. contents should be an open io.Reader which will have all +// its contents read. +// +// This is a low level interface. +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted. +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { + return c.objectPut(container, objectName, contents, checkHash, Hash, contentType, h, nil) +} + +// ObjectPutBytes creates an object from a []byte in a container. +// +// This is a simplified interface which checks the MD5. +func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { + buf := bytes.NewBuffer(contents) + h := Headers{"Content-Length": strconv.Itoa(len(contents))} + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) + return +} + +// ObjectPutString creates an object from a string in a container. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { + buf := strings.NewReader(contents) + h := Headers{"Content-Length": strconv.Itoa(len(contents))} + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) + return +} + +// ObjectOpenFile represents a swift object open for reading +type ObjectOpenFile struct { + connection *Connection // stored copy of Connection used in Open + container string // stored copy of container used in Open + objectName string // stored copy of objectName used in Open + headers Headers // stored copy of headers used in Open + resp *http.Response // http connection + body io.Reader // read data from this + checkHash bool // true if checking MD5 + hash hash.Hash // currently accumulating MD5 + bytes int64 // number of bytes read on this connection + eof bool // whether we have read end of file + pos int64 // current position when reading + lengthOk bool // whether length is valid + length int64 // length of the object if read + seeked bool // whether we have seeked this file or not + overSeeked bool // set if we have seeked to the end or beyond +} + +// Read bytes from the object - see io.Reader +func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { + if file.overSeeked { + return 0, io.EOF + } + n, err = file.body.Read(p) + file.bytes += int64(n) + file.pos += int64(n) + if err == io.EOF { + file.eof = true + } + return +} + +// Seek sets the offset for the next Read to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 +// means relative to the current offset, and 2 means relative to the +// end. Seek returns the new offset and an Error, if any. +// +// Seek uses HTTP Range headers which, if the file pointer is moved, +// will involve reopening the HTTP connection. +// +// Note that you can't seek to the end of a file or beyond; HTTP Range +// requests don't support the file pointer being outside the data, +// unlike os.File +// +// Seek(0, 1) will return the current file pointer. +func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { + file.overSeeked = false + switch whence { + case 0: // relative to start + newPos = offset + case 1: // relative to current + newPos = file.pos + offset + case 2: // relative to end + if !file.lengthOk { + return file.pos, newError(0, "Length of file unknown so can't seek from end") + } + newPos = file.length + offset + if offset >= 0 { + file.overSeeked = true + return + } + default: + panic("Unknown whence in ObjectOpenFile.Seek") + } + // If at correct position (quite likely), do nothing + if newPos == file.pos { + return + } + // Close the file... + file.seeked = true + err = file.Close() + if err != nil { + return + } + // ...and re-open with a Range header + if file.headers == nil { + file.headers = Headers{} + } + if newPos > 0 { + file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos) + } else { + delete(file.headers, "Range") + } + newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers) + if err != nil { + return + } + // Update the file + file.resp = newFile.resp + file.body = newFile.body + file.checkHash = false + file.pos = newPos + return +} + +// Length gets the objects content length either from a cached copy or +// from the server. +func (file *ObjectOpenFile) Length() (int64, error) { + if !file.lengthOk { + info, _, err := file.connection.Object(file.container, file.objectName) + file.length = info.Bytes + file.lengthOk = (err == nil) + return file.length, err + } + return file.length, nil +} + +// Close the object and checks the length and md5sum if it was +// required and all the object was read +func (file *ObjectOpenFile) Close() (err error) { + // Close the body at the end + defer checkClose(file.resp.Body, &err) + + // If not end of file or seeked then can't check anything + if !file.eof || file.seeked { + return + } + + // Check the MD5 sum if requested + if file.checkHash { + receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag")) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + + // Check to see we read the correct number of bytes + if file.lengthOk && file.length != file.bytes { + err = ObjectCorrupted + return + } + return +} + +// Check it satisfies the interfaces +var _ io.ReadCloser = &ObjectOpenFile{} +var _ io.Seeker = &ObjectOpenFile{} + +func (c *Connection) objectOpenBase(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { + var resp *http.Response + opts := RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "GET", + ErrorMap: objectErrorMap, + Headers: h, + Parameters: parameters, + } + resp, headers, err = c.storage(opts) + if err != nil { + return + } + // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set + if checkHash && headers.IsLargeObject() { + // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) + checkHash = false + } + file = &ObjectOpenFile{ + connection: c, + container: container, + objectName: objectName, + headers: h, + resp: resp, + checkHash: checkHash, + body: resp.Body, + } + if checkHash { + file.hash = md5.New() + file.body = io.TeeReader(resp.Body, file.hash) + } + // Read Content-Length + if resp.Header.Get("Content-Length") != "" { + file.length, err = getInt64FromHeader(resp, "Content-Length") + file.lengthOk = (err == nil) + } + return +} + +func (c *Connection) objectOpen(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { + err = withLORetry(0, func() (Headers, int64, error) { + file, headers, err = c.objectOpenBase(container, objectName, checkHash, h, parameters) + if err != nil { + return headers, 0, err + } + return headers, file.length, nil + }) + return +} + +// ObjectOpen returns an ObjectOpenFile for reading the contents of +// the object. This satisfies the io.ReadCloser and the io.Seeker +// interfaces. +// +// You must call Close() on contents when finished +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. It +// will also check the length returned. No checking will be done if +// you don't read all the contents. +// +// Note that objects with X-Object-Manifest or X-Static-Large-Object +// set won't ever have their md5sum's checked as the md5sum reported +// on the object is actually the md5sum of the md5sums of the +// parts. This isn't very helpful to detect a corrupted download as +// the size of the parts aren't known without doing more operations. +// If you want to ensure integrity of an object with a manifest then +// you will need to download everything in the manifest separately. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { + return c.objectOpen(container, objectName, checkHash, h, nil) +} + +// ObjectGet gets the object into the io.Writer contents. +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) { + file, headers, err := c.ObjectOpen(container, objectName, checkHash, h) + if err != nil { + return + } + defer checkClose(file, &err) + _, err = io.Copy(contents, file) + return +} + +// ObjectGetBytes returns an object as a []byte. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.Bytes() + return +} + +// ObjectGetString returns an object as a string. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.String() + return +} + +// ObjectDelete deletes the object. +// +// May return ObjectNotFound if the object isn't found +func (c *Connection) ObjectDelete(container string, objectName string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "DELETE", + ErrorMap: objectErrorMap, + }) + return err +} + +// ObjectTempUrl returns a temporary URL for an object +func (c *Connection) ObjectTempUrl(container string, objectName string, secretKey string, method string, expires time.Time) string { + mac := hmac.New(sha1.New, []byte(secretKey)) + prefix, _ := url.Parse(c.StorageUrl) + body := fmt.Sprintf("%s\n%d\n%s/%s/%s", method, expires.Unix(), prefix.Path, container, objectName) + mac.Write([]byte(body)) + sig := hex.EncodeToString(mac.Sum(nil)) + return fmt.Sprintf("%s/%s/%s?temp_url_sig=%s&temp_url_expires=%d", c.StorageUrl, container, objectName, sig, expires.Unix()) +} + +// parseResponseStatus parses string like "200 OK" and returns Error. +// +// For status codes beween 200 and 299, this returns nil. +func parseResponseStatus(resp string, errorMap errorMap) error { + code := 0 + reason := resp + t := strings.SplitN(resp, " ", 2) + if len(t) == 2 { + ncode, err := strconv.Atoi(t[0]) + if err == nil { + code = ncode + reason = t[1] + } + } + if errorMap != nil { + if err, ok := errorMap[code]; ok { + return err + } + } + if 200 <= code && code <= 299 { + return nil + } + return newError(code, reason) +} + +// BulkDeleteResult stores results of BulkDelete(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where the object was +// to be deleted, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkDeleteResult struct { + NumberNotFound int64 // # of objects not found. + NumberDeleted int64 // # of deleted objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, err error) { + var buffer bytes.Buffer + for _, s := range objects { + u := url.URL{Path: s} + buffer.WriteString(u.String() + "\n") + } + resp, headers, err := c.storage(RequestOpts{ + Operation: "DELETE", + Parameters: url.Values{"bulk-delete": []string{"1"}}, + Headers: Headers{ + "Accept": "application/json", + "Content-Type": "text/plain", + "Content-Length": strconv.Itoa(buffer.Len()), + }, + ErrorMap: ContainerErrorMap, + Body: &buffer, + }) + if err != nil { + return + } + var jsonResult struct { + NotFound int64 `json:"Number Not Found"` + Status string `json:"Response Status"` + Errors [][]string + Deleted int64 `json:"Number Deleted"` + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberNotFound = jsonResult.NotFound + result.NumberDeleted = jsonResult.Deleted + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// BulkDelete deletes multiple objectNames from container in one operation. +// +// Some servers may not accept bulk-delete requests since bulk-delete is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html +func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { + if len(objectNames) == 0 { + result.Errors = make(map[string]error) + return + } + fullPaths := make([]string, len(objectNames)) + for i, name := range objectNames { + fullPaths[i] = fmt.Sprintf("/%s/%s", container, name) + } + return c.doBulkDelete(fullPaths) +} + +// BulkUploadResult stores results of BulkUpload(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where an object was +// to be created, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkUploadResult struct { + NumberCreated int64 // # of created objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +// BulkUpload uploads multiple files in one operation. +// +// uploadPath can be empty, a container name, or a pseudo-directory +// within a container. If uploadPath is empty, new containers may be +// automatically created. +// +// Files are read from dataStream. The format of the stream is specified +// by the format parameter. Available formats are: +// * UploadTar - Plain tar stream. +// * UploadTarGzip - Gzip compressed tar stream. +// * UploadTarBzip2 - Bzip2 compressed tar stream. +// +// Some servers may not accept bulk-upload requests since bulk-upload is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html +func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) { + extraHeaders := Headers{"Accept": "application/json"} + for key, value := range h { + extraHeaders[key] = value + } + // The following code abuses Container parameter intentionally. + // The best fix might be to rename Container to UploadPath. + resp, headers, err := c.storage(RequestOpts{ + Container: uploadPath, + Operation: "PUT", + Parameters: url.Values{"extract-archive": []string{format}}, + Headers: extraHeaders, + ErrorMap: ContainerErrorMap, + Body: dataStream, + }) + if err != nil { + return + } + // Detect old servers which don't support this feature + if headers["Content-Type"] != "application/json" { + err = Forbidden + return + } + var jsonResult struct { + Created int64 `json:"Number Files Created"` + Status string `json:"Response Status"` + Errors [][]string + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberCreated = jsonResult.Created + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// Object returns info about a single object including any metadata in the header. +// +// May return ObjectNotFound. +// +// Use headers.ObjectMetadata() to read the metadata in the Headers. +func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { + err = withLORetry(0, func() (Headers, int64, error) { + info, headers, err = c.objectBase(container, objectName) + if err != nil { + return headers, 0, err + } + return headers, info.Bytes, nil + }) + return +} + +func (c *Connection) objectBase(container string, objectName string) (info Object, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "HEAD", + ErrorMap: objectErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + // HTTP/1.1 200 OK + // Date: Thu, 07 Jun 2010 20:59:39 GMT + // Server: Apache + // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT + // ETag: 8a964ee2a5e88be344f36c22562a6486 + // Content-Length: 512000 + // Content-Type: text/plain; charset=UTF-8 + // X-Object-Meta-Meat: Bacon + // X-Object-Meta-Fruit: Bacon + // X-Object-Meta-Veggie: Bacon + // X-Object-Meta-Dairy: Bacon + info.Name = objectName + info.ContentType = resp.Header.Get("Content-Type") + if resp.Header.Get("Content-Length") != "" { + if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil { + return + } + } + // Currently ceph doesn't return a Last-Modified header for DLO manifests without any segments + // See ceph http://tracker.ceph.com/issues/15812 + if resp.Header.Get("Last-Modified") != "" { + info.ServerLastModified = resp.Header.Get("Last-Modified") + if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil { + return + } + } + + info.Hash = resp.Header.Get("Etag") + if resp.Header.Get("X-Object-Manifest") != "" { + info.ObjectType = DynamicLargeObjectType + } else if resp.Header.Get("X-Static-Large-Object") != "" { + info.ObjectType = StaticLargeObjectType + } + + return +} + +// ObjectUpdate adds, replaces or removes object metadata. +// +// Add or Update keys by mentioning them in the Metadata. Use +// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your +// Metadata to and from normal HTTP headers. +// +// This removes all metadata previously added to the object and +// replaces it with that passed in so to delete keys, just don't +// mention them the headers you pass in. +// +// Object metadata can only be read with Object() not with Objects(). +// +// This can also be used to set headers not already assigned such as +// X-Delete-At or X-Delete-After for expiring objects. +// +// You cannot use this to change any of the object's other headers +// such as Content-Type, ETag, etc. +// +// Refer to copying an object when you need to update metadata or +// other headers such as Content-Type or CORS headers. +// +// May return ObjectNotFound. +func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "POST", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// urlPathEscape escapes URL path the in string using URL escaping rules +// +// This mimics url.PathEscape which only available from go 1.8 +func urlPathEscape(in string) string { + var u url.URL + u.Path = in + return u.String() +} + +// ObjectCopy does a server side copy of an object to a new position +// +// All metadata is preserved. If metadata is set in the headers then +// it overrides the old metadata on the copied object. +// +// The destination container must exist before the copy. +// +// You can use this to copy an object to itself - this is the only way +// to update the content type of an object. +func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { + // Meta stuff + extraHeaders := map[string]string{ + "Destination": urlPathEscape(dstContainer + "/" + dstObjectName), + } + for key, value := range h { + extraHeaders[key] = value + } + _, headers, err = c.storage(RequestOpts{ + Container: srcContainer, + ObjectName: srcObjectName, + Operation: "COPY", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: extraHeaders, + }) + return +} + +// ObjectMove does a server side move of an object to a new position +// +// This is a convenience method which calls ObjectCopy then ObjectDelete +// +// All metadata is preserved. +// +// The destination container must exist before the copy. +func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) { + _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil) + if err != nil { + return + } + return c.ObjectDelete(srcContainer, srcObjectName) +} + +// ObjectUpdateContentType updates the content type of an object +// +// This is a convenience method which calls ObjectCopy +// +// All other metadata is preserved. +func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) { + h := Headers{"Content-Type": contentType} + _, err = c.ObjectCopy(container, objectName, container, objectName, h) + return +} + +// ------------------------------------------------------------ + +// VersionContainerCreate is a helper method for creating and enabling version controlled containers. +// +// It builds the current object container, the non-current object version container, and enables versioning. +// +// If the server doesn't support versioning then it will return +// Forbidden however it will have created both the containers at that point. +func (c *Connection) VersionContainerCreate(current, version string) error { + if err := c.ContainerCreate(version, nil); err != nil { + return err + } + if err := c.ContainerCreate(current, nil); err != nil { + return err + } + if err := c.VersionEnable(current, version); err != nil { + return err + } + return nil +} + +// VersionEnable enables versioning on the current container with version as the tracking container. +// +// May return Forbidden if this isn't supported by the server +func (c *Connection) VersionEnable(current, version string) error { + h := Headers{"X-Versions-Location": version} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + // Check to see if the header was set properly + _, headers, err := c.Container(current) + if err != nil { + return err + } + // If failed to set versions header, return Forbidden as the server doesn't support this + if headers["X-Versions-Location"] != version { + return Forbidden + } + return nil +} + +// VersionDisable disables versioning on the current container. +func (c *Connection) VersionDisable(current string) error { + h := Headers{"X-Versions-Location": ""} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + return nil +} + +// VersionObjectList returns a list of older versions of the object. +// +// Objects are returned in the format / +func (c *Connection) VersionObjectList(version, object string) ([]string, error) { + opts := &ObjectsOpts{ + // <3-character zero-padded hexadecimal character length>/ + Prefix: fmt.Sprintf("%03x", len(object)) + object + "/", + } + return c.ObjectNames(version, opts) +} diff --git a/vendor/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/ncw/swift/timeout_reader.go new file mode 100644 index 000000000000..88ae733281e1 --- /dev/null +++ b/vendor/github.com/ncw/swift/timeout_reader.go @@ -0,0 +1,59 @@ +package swift + +import ( + "io" + "time" +) + +// An io.ReadCloser which obeys an idle timeout +type timeoutReader struct { + reader io.ReadCloser + timeout time.Duration + cancel func() +} + +// Returns a wrapper around the reader which obeys an idle +// timeout. The cancel function is called if the timeout happens +func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader { + return &timeoutReader{ + reader: reader, + timeout: timeout, + cancel: cancel, + } +} + +// Read reads up to len(p) bytes into p +// +// Waits at most for timeout for the read to complete otherwise returns a timeout +func (t *timeoutReader) Read(p []byte) (int, error) { + // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? + // Do the read in the background + type result struct { + n int + err error + } + done := make(chan result, 1) + go func() { + n, err := t.reader.Read(p) + done <- result{n, err} + }() + // Wait for the read or the timeout + timer := time.NewTimer(t.timeout) + defer timer.Stop() + select { + case r := <-done: + return r.n, r.err + case <-timer.C: + t.cancel() + return 0, TimeoutError + } + panic("unreachable") // for Go 1.0 +} + +// Close the channel +func (t *timeoutReader) Close() error { + return t.reader.Close() +} + +// Check it satisfies the interface +var _ io.ReadCloser = &timeoutReader{} diff --git a/vendor/github.com/ncw/swift/travis_realserver.sh b/vendor/github.com/ncw/swift/travis_realserver.sh new file mode 100644 index 000000000000..970e94c0d1f4 --- /dev/null +++ b/vendor/github.com/ncw/swift/travis_realserver.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then + exit 0 +fi + +if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then + echo "Running tests pointing to Rackspace" + export SWIFT_API_KEY=$RACKSPACE_APIKEY + export SWIFT_API_USER=$RACKSPACE_USER + export SWIFT_AUTH_URL=$RACKSPACE_AUTH + go test ./... +fi + +if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then + echo "Running tests pointing to Memset" + export SWIFT_API_KEY=$MEMSET_APIKEY + export SWIFT_API_USER=$MEMSET_USER + export SWIFT_AUTH_URL=$MEMSET_AUTH + go test +fi diff --git a/vendor/github.com/ncw/swift/watchdog_reader.go b/vendor/github.com/ncw/swift/watchdog_reader.go new file mode 100644 index 000000000000..2714c9e1a475 --- /dev/null +++ b/vendor/github.com/ncw/swift/watchdog_reader.go @@ -0,0 +1,55 @@ +package swift + +import ( + "io" + "time" +) + +var watchdogChunkSize = 1 << 20 // 1 MiB + +// An io.Reader which resets a watchdog timer whenever data is read +type watchdogReader struct { + timeout time.Duration + reader io.Reader + timer *time.Timer + chunkSize int +} + +// Returns a new reader which will kick the watchdog timer whenever data is read +func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { + return &watchdogReader{ + timeout: timeout, + reader: reader, + timer: timer, + chunkSize: watchdogChunkSize, + } +} + +// Read reads up to len(p) bytes into p +func (t *watchdogReader) Read(p []byte) (int, error) { + //read from underlying reader in chunks not larger than t.chunkSize + //while resetting the watchdog timer before every read; the small chunk + //size ensures that the timer does not fire when reading a large amount of + //data from a slow connection + start := 0 + end := len(p) + for start < end { + length := end - start + if length > t.chunkSize { + length = t.chunkSize + } + + resetTimer(t.timer, t.timeout) + n, err := t.reader.Read(p[start : start+length]) + start += n + if n == 0 || err != nil { + return start, err + } + } + + resetTimer(t.timer, t.timeout) + return start, nil +} + +// Check it satisfies the interface +var _ io.Reader = &watchdogReader{} diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go new file mode 100644 index 000000000000..676c3f7d59e0 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go @@ -0,0 +1,320 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package swift implements common object storage abstractions against OpenStack swift APIs. +package swift + +import ( + "context" + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/go-kit/kit/log" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + "github.com/gophercloud/gophercloud/pagination" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/objstore" + "gopkg.in/yaml.v2" +) + +// DirDelim is the delimiter used to model a directory structure in an object store bucket. +const DirDelim = "/" + +type SwiftConfig struct { + AuthUrl string `yaml:"auth_url"` + Username string `yaml:"username"` + UserDomainName string `yaml:"user_domain_name"` + UserDomainID string `yaml:"user_domain_id"` + UserId string `yaml:"user_id"` + Password string `yaml:"password"` + DomainId string `yaml:"domain_id"` + DomainName string `yaml:"domain_name"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + ProjectDomainID string `yaml:"project_domain_id"` + ProjectDomainName string `yaml:"project_domain_name"` + RegionName string `yaml:"region_name"` + ContainerName string `yaml:"container_name"` +} + +type Container struct { + logger log.Logger + client *gophercloud.ServiceClient + name string +} + +func NewContainer(logger log.Logger, conf []byte) (*Container, error) { + sc, err := parseConfig(conf) + if err != nil { + return nil, err + } + + authOpts, err := authOptsFromConfig(sc) + if err != nil { + return nil, err + } + + provider, err := openstack.AuthenticatedClient(authOpts) + if err != nil { + return nil, err + } + + client, err := openstack.NewObjectStorageV1(provider, gophercloud.EndpointOpts{ + Region: sc.RegionName, + }) + if err != nil { + return nil, err + } + + return &Container{ + logger: logger, + client: client, + name: sc.ContainerName, + }, nil +} + +// Name returns the container name for swift. +func (c *Container) Name() string { + return c.name +} + +// Iter calls f for each entry in the given directory. The argument to f is the full +// object name including the prefix of the inspected directory. +func (c *Container) Iter(ctx context.Context, dir string, f func(string) error) error { + // Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the + // object itself as one prefix item. + if dir != "" { + dir = strings.TrimSuffix(dir, DirDelim) + DirDelim + } + + options := &objects.ListOpts{Full: false, Prefix: dir, Delimiter: DirDelim} + return objects.List(c.client, c.name, options).EachPage(func(page pagination.Page) (bool, error) { + objectNames, err := objects.ExtractNames(page) + if err != nil { + return false, err + } + for _, objectName := range objectNames { + if err := f(objectName); err != nil { + return false, err + } + } + + return true, nil + }) +} + +// Get returns a reader for the given object name. +func (c *Container) Get(ctx context.Context, name string) (io.ReadCloser, error) { + if name == "" { + return nil, errors.New("error, empty container name passed") + } + response := objects.Download(c.client, c.name, name, nil) + return response.Body, response.Err +} + +// GetRange returns a new range reader for the given object name and range. +func (c *Container) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + options := objects.DownloadOpts{ + Newest: true, + Range: fmt.Sprintf("bytes=%d-%d", off, off+length-1), + } + response := objects.Download(c.client, c.name, name, options) + return response.Body, response.Err +} + +// ObjectSize returns the size of the specified object. +func (c *Container) ObjectSize(ctx context.Context, name string) (uint64, error) { + response := objects.Get(c.client, c.name, name, nil) + headers, err := response.Extract() + if err != nil { + return 0, err + } + return uint64(headers.ContentLength), nil +} + +// Exists checks if the given object exists. +func (c *Container) Exists(ctx context.Context, name string) (bool, error) { + err := objects.Get(c.client, c.name, name, nil).Err + if err == nil { + return true, nil + } + + if _, ok := err.(gophercloud.ErrDefault404); ok { + return false, nil + } + + return false, err +} + +// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. +func (c *Container) IsObjNotFoundErr(err error) bool { + _, ok := err.(gophercloud.ErrDefault404) + return ok +} + +// Upload writes the contents of the reader as an object into the container. +func (c *Container) Upload(ctx context.Context, name string, r io.Reader) error { + options := &objects.CreateOpts{Content: r} + res := objects.Create(c.client, c.name, name, options) + return res.Err +} + +// Delete removes the object with the given name. +func (c *Container) Delete(ctx context.Context, name string) error { + return objects.Delete(c.client, c.name, name, nil).Err +} + +func (*Container) Close() error { + // Nothing to close. + return nil +} + +func parseConfig(conf []byte) (*SwiftConfig, error) { + var sc SwiftConfig + err := yaml.UnmarshalStrict(conf, &sc) + return &sc, err +} + +func authOptsFromConfig(sc *SwiftConfig) (gophercloud.AuthOptions, error) { + authOpts := gophercloud.AuthOptions{ + IdentityEndpoint: sc.AuthUrl, + Username: sc.Username, + UserID: sc.UserId, + Password: sc.Password, + DomainID: sc.DomainId, + DomainName: sc.DomainName, + TenantID: sc.ProjectID, + TenantName: sc.ProjectName, + + // Allow Gophercloud to re-authenticate automatically. + AllowReauth: true, + } + + // Support for cross-domain scoping (user in different domain than project). + // If a userDomainName or userDomainID is given, the user is scoped to this domain. + switch { + case sc.UserDomainName != "": + authOpts.DomainName = sc.UserDomainName + case sc.UserDomainID != "": + authOpts.DomainID = sc.UserDomainID + } + + // A token can be scoped to a domain or project. + // The project can be in another domain than the user, which is indicated by setting either projectDomainName or projectDomainID. + switch { + case sc.ProjectDomainName != "": + authOpts.Scope = &gophercloud.AuthScope{ + DomainName: sc.ProjectDomainName, + } + case sc.ProjectDomainID != "": + authOpts.Scope = &gophercloud.AuthScope{ + DomainID: sc.ProjectDomainID, + } + } + if authOpts.Scope != nil { + switch { + case sc.ProjectName != "": + authOpts.Scope.ProjectName = sc.ProjectName + case sc.ProjectID != "": + authOpts.Scope.ProjectID = sc.ProjectID + } + } + return authOpts, nil +} + +func (c *Container) createContainer(name string) error { + return containers.Create(c.client, name, nil).Err +} + +func (c *Container) deleteContainer(name string) error { + return containers.Delete(c.client, name).Err +} + +func configFromEnv() SwiftConfig { + c := SwiftConfig{ + AuthUrl: os.Getenv("OS_AUTH_URL"), + Username: os.Getenv("OS_USERNAME"), + Password: os.Getenv("OS_PASSWORD"), + RegionName: os.Getenv("OS_REGION_NAME"), + ContainerName: os.Getenv("OS_CONTAINER_NAME"), + ProjectID: os.Getenv("OS_PROJECT_ID"), + ProjectName: os.Getenv("OS_PROJECT_NAME"), + UserDomainID: os.Getenv("OS_USER_DOMAIN_ID"), + UserDomainName: os.Getenv("OS_USER_DOMAIN_NAME"), + ProjectDomainID: os.Getenv("OS_PROJET_DOMAIN_ID"), + ProjectDomainName: os.Getenv("OS_PROJECT_DOMAIN_NAME"), + } + + return c +} + +// validateForTests checks to see the config options for tests are set. +func validateForTests(conf SwiftConfig) error { + if conf.AuthUrl == "" || + conf.Username == "" || + conf.Password == "" || + (conf.ProjectName == "" && conf.ProjectID == "") || + conf.RegionName == "" { + return errors.New("insufficient swift test configuration information") + } + return nil +} + +// NewTestContainer creates test objStore client that before returning creates temporary container. +// In a close function it empties and deletes the container. +func NewTestContainer(t testing.TB) (objstore.Bucket, func(), error) { + config := configFromEnv() + if err := validateForTests(config); err != nil { + return nil, nil, err + } + containerConfig, err := yaml.Marshal(config) + if err != nil { + return nil, nil, err + } + + c, err := NewContainer(log.NewNopLogger(), containerConfig) + if err != nil { + return nil, nil, err + } + + if config.ContainerName != "" { + if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { + return nil, nil, errors.New("OS_CONTAINER_NAME is defined. Normally this tests will create temporary container " + + "and delete it after test. Unset OS_CONTAINER_NAME env variable to use default logic. If you really want to run " + + "tests against provided (NOT USED!) container, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That container " + + "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + + "to safety (accidentally pointing prod container for test) as well as swift not being fully strong consistent.") + } + + if err := c.Iter(context.Background(), "", func(f string) error { + return errors.Errorf("container %s is not empty", config.ContainerName) + }); err != nil { + return nil, nil, errors.Wrapf(err, "swift check container %s", config.ContainerName) + } + + t.Log("WARNING. Reusing", config.ContainerName, "container for Swift tests. Manual cleanup afterwards is required") + return c, func() {}, nil + } + + tmpContainerName := objstore.CreateTemporaryTestBucketName(t) + + if err := c.createContainer(tmpContainerName); err != nil { + return nil, nil, err + } + + c.name = tmpContainerName + t.Log("created temporary container for swift tests with name", tmpContainerName) + + return c, func() { + objstore.EmptyBucket(t, context.Background(), c) + if err := c.deleteContainer(tmpContainerName); err != nil { + t.Logf("deleting container %s failed: %s", tmpContainerName, err) + } + }, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8406a409c569..e2179fa00d37 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -138,7 +138,7 @@ github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog github.com/coreos/pkg/dlopen -# github.com/cortexproject/cortex v1.0.1-0.20200423101820-36496a074bc4 +# github.com/cortexproject/cortex v1.0.1-0.20200424135841-64fb9ad94a38 github.com/cortexproject/cortex/pkg/alertmanager github.com/cortexproject/cortex/pkg/alertmanager/alerts github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb @@ -153,6 +153,7 @@ github.com/cortexproject/cortex/pkg/chunk/encoding github.com/cortexproject/cortex/pkg/chunk/gcp github.com/cortexproject/cortex/pkg/chunk/local github.com/cortexproject/cortex/pkg/chunk/objectclient +github.com/cortexproject/cortex/pkg/chunk/openstack github.com/cortexproject/cortex/pkg/chunk/purger github.com/cortexproject/cortex/pkg/chunk/storage github.com/cortexproject/cortex/pkg/chunk/testutils @@ -393,6 +394,9 @@ github.com/gophercloud/gophercloud/openstack/compute/v2/servers github.com/gophercloud/gophercloud/openstack/identity/v2/tenants github.com/gophercloud/gophercloud/openstack/identity/v2/tokens github.com/gophercloud/gophercloud/openstack/identity/v3/tokens +github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts +github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers +github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/pagination # github.com/gorilla/mux v1.7.1 @@ -519,6 +523,8 @@ github.com/modern-go/reflect2 github.com/morikuni/aec # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack +# github.com/ncw/swift v1.0.50 +github.com/ncw/swift # github.com/oklog/run v1.0.0 github.com/oklog/run # github.com/oklog/ulid v1.3.1 @@ -712,6 +718,7 @@ github.com/thanos-io/thanos/pkg/objstore/azure github.com/thanos-io/thanos/pkg/objstore/filesystem github.com/thanos-io/thanos/pkg/objstore/gcs github.com/thanos-io/thanos/pkg/objstore/s3 +github.com/thanos-io/thanos/pkg/objstore/swift github.com/thanos-io/thanos/pkg/pool github.com/thanos-io/thanos/pkg/runutil github.com/thanos-io/thanos/pkg/shipper From 2e78fc37929338db2954c9d1ddc5b992ff7f6355 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 24 Apr 2020 10:46:27 -0400 Subject: [PATCH 2/3] Fixes a test. Signed-off-by: Cyril Tovena --- pkg/logql/metrics_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go index c3374718f17e..913d54658d45 100644 --- a/pkg/logql/metrics_test.go +++ b/pkg/logql/metrics_test.go @@ -72,7 +72,7 @@ func TestLogSlowQuery(t *testing.T) { }) require.Equal(t, fmt.Sprintf( - "level=info org_id=foo trace_id=%s latency=slow query=\"{foo=\\\"bar\\\"} |= \\\"buzz\\\"\" query_type=filter range_type=range length=1h0m0s step=1m0s duration=25.25s status=200 throughput_mb=0.01 total_bytes_mb=0.01\n", + "level=info org_id=foo traceID=%s latency=slow query=\"{foo=\\\"bar\\\"} |= \\\"buzz\\\"\" query_type=filter range_type=range length=1h0m0s step=1m0s duration=25.25s status=200 throughput_mb=0.01 total_bytes_mb=0.01\n", sp.Context().(jaeger.SpanContext).SpanID().String(), ), buf.String()) From 231e645908d9e8d470ddd6689d8259e58ba29cec Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 24 Apr 2020 11:42:55 -0400 Subject: [PATCH 3/3] Update go.sum Signed-off-by: Cyril Tovena --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 9204ae3c20c1..4abfb118076f 100644 --- a/go.sum +++ b/go.sum @@ -170,8 +170,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= -github.com/cortexproject/cortex v1.0.1-0.20200423101820-36496a074bc4 h1:SNBpM6lX8ZjDsSrQWbxP1FRO8KXirnRwFvtcLA8+DCc= -github.com/cortexproject/cortex v1.0.1-0.20200423101820-36496a074bc4/go.mod h1:S2BogfHdb0YCo5Zly3vOEsqzsE7YXdumHBMRJkgDZm4= github.com/cortexproject/cortex v1.0.1-0.20200424135841-64fb9ad94a38 h1:zvaE5fX7A1ZcrAuXYxhXoDSVqI6Q1byZEqYAK+9KhAM= github.com/cortexproject/cortex v1.0.1-0.20200424135841-64fb9ad94a38/go.mod h1:CTLwVWnV5PfcLZqzZVe+0Sa69pRrhyT2d9g0ml5S9aQ= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=