From c80254c4f1ff07b328b3d2d2ce7707d5025df381 Mon Sep 17 00:00:00 2001 From: jkoberg Date: Tue, 15 Oct 2024 16:03:23 +0200 Subject: [PATCH 1/6] feat(ocis): remove ocm store Signed-off-by: jkoberg --- changelog/unreleased/remove-deprecations.md | 5 +++++ ocis-pkg/shared/shared_types.go | 3 +-- services/activitylog/pkg/command/server.go | 1 - services/activitylog/pkg/config/config.go | 5 ++--- services/eventhistory/README.md | 4 ---- services/eventhistory/pkg/command/server.go | 1 - services/eventhistory/pkg/config/config.go | 5 ++--- services/frontend/README.md | 4 ---- services/frontend/pkg/config/config.go | 3 +-- services/frontend/pkg/revaconfig/config.go | 1 - services/gateway/README.md | 4 ---- services/gateway/pkg/config/config.go | 6 ++---- services/gateway/pkg/revaconfig/config.go | 2 -- services/graph/README.md | 4 ---- services/graph/pkg/config/cache.go | 3 +-- services/graph/pkg/config/defaults/defaultconfig.go | 1 - services/graph/pkg/service/v0/service.go | 1 - services/ocs/README.md | 2 +- services/postprocessing/README.md | 4 ---- services/postprocessing/pkg/command/server.go | 1 - services/postprocessing/pkg/config/config.go | 5 ++--- services/proxy/README.md | 6 +----- services/proxy/pkg/command/server.go | 1 - services/proxy/pkg/config/config.go | 3 +-- services/proxy/pkg/config/defaults/defaultconfig.go | 1 - services/settings/README.md | 4 ---- services/settings/pkg/config/config.go | 3 +-- services/settings/pkg/store/metadata/cache.go | 2 -- services/storage-system/README.md | 4 ---- services/storage-system/pkg/config/config.go | 3 +-- services/storage-system/pkg/revaconfig/config.go | 1 - services/storage-users/README.md | 4 ---- services/storage-users/pkg/config/config.go | 6 ++---- services/storage-users/pkg/revaconfig/drivers.go | 10 ---------- services/userlog/README.md | 4 ---- services/userlog/pkg/command/server.go | 1 - services/userlog/pkg/config/config.go | 5 ++--- 37 files changed, 25 insertions(+), 98 deletions(-) create mode 100644 changelog/unreleased/remove-deprecations.md diff --git a/changelog/unreleased/remove-deprecations.md b/changelog/unreleased/remove-deprecations.md new file mode 100644 index 00000000000..a2a1a5e1bd1 --- /dev/null +++ b/changelog/unreleased/remove-deprecations.md @@ -0,0 +1,5 @@ +Enhancement: Remove Deprecations + +Remove deprecated stores/caches/registries and envvars from the codebase. + +https://github.com/owncloud/ocis/pull/10305 diff --git a/ocis-pkg/shared/shared_types.go b/ocis-pkg/shared/shared_types.go index 2b8b6f5e9c8..4ed1df7a18f 100644 --- a/ocis-pkg/shared/shared_types.go +++ b/ocis-pkg/shared/shared_types.go @@ -57,11 +57,10 @@ type HTTPServiceTLS struct { type Cache struct { Store string `yaml:"store" env:"OCIS_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"OCIS_CACHE_STORE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` Table string `yaml:"table" env:"OCIS_CACHE_STORE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_CACHE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured." introductionVersion:"pre5.0"` DisablePersistence bool `yaml:"disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` AuthUsername string `yaml:"auth_username" env:"OCIS_CACHE_AUTH_USERNAME" desc:"The username to use for authentication. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"pre5.0"` AuthPassword string `yaml:"auth_password" env:"OCIS_CACHE_AUTH_PASSWORD" desc:"The password to use for authentication. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"pre5.0"` diff --git a/services/activitylog/pkg/command/server.go b/services/activitylog/pkg/command/server.go index 9c018ecdbcd..67fa7ad9c6b 100644 --- a/services/activitylog/pkg/command/server.go +++ b/services/activitylog/pkg/command/server.go @@ -79,7 +79,6 @@ func Server(cfg *config.Config) *cli.Command { evStore := store.Create( store.Store(cfg.Store.Store), store.TTL(cfg.Store.TTL), - store.Size(cfg.Store.Size), microstore.Nodes(cfg.Store.Nodes...), microstore.Database(cfg.Store.Database), microstore.Table(cfg.Store.Table), diff --git a/services/activitylog/pkg/config/config.go b/services/activitylog/pkg/config/config.go index 912c0d35217..1bfff92c8a8 100644 --- a/services/activitylog/pkg/config/config.go +++ b/services/activitylog/pkg/config/config.go @@ -47,12 +47,11 @@ type Events struct { // Store configures the store to use type Store struct { - Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;ACTIVITYLOG_STORE" desc:"The type of the store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;ACTIVITYLOG_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;ACTIVITYLOG_STORE" desc:"The type of the store. Supported values are: 'memory', 'nats-js-kv', 'redis-sentinel', 'noop'. See the text description for details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;ACTIVITYLOG_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"ACTIVITYLOG_STORE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` Table string `yaml:"table" env:"ACTIVITYLOG_STORE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_PERSISTENT_STORE_TTL;ACTIVITYLOG_STORE_TTL" desc:"Time to live for events in the store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_PERSISTENT_STORE_SIZE;ACTIVITYLOG_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not explicitly set as default." introductionVersion:"pre5.0"` AuthUsername string `yaml:"username" env:"OCIS_PERSISTENT_STORE_AUTH_USERNAME;ACTIVITYLOG_STORE_AUTH_USERNAME" desc:"The username to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_PERSISTENT_STORE_AUTH_PASSWORD;ACTIVITYLOG_STORE_AUTH_PASSWORD" desc:"The password to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` } diff --git a/services/eventhistory/README.md b/services/eventhistory/README.md index ec917157184..1648d166784 100644 --- a/services/eventhistory/README.md +++ b/services/eventhistory/README.md @@ -17,10 +17,6 @@ The `eventhistory` service stores each consumed event via the configured store i - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/eventhistory/pkg/command/server.go b/services/eventhistory/pkg/command/server.go index 7107f899756..15e0dfd932b 100644 --- a/services/eventhistory/pkg/command/server.go +++ b/services/eventhistory/pkg/command/server.go @@ -63,7 +63,6 @@ func Server(cfg *config.Config) *cli.Command { st := store.Create( store.Store(cfg.Store.Store), store.TTL(cfg.Store.TTL), - store.Size(cfg.Store.Size), microstore.Nodes(cfg.Store.Nodes...), microstore.Database(cfg.Store.Database), microstore.Table(cfg.Store.Table), diff --git a/services/eventhistory/pkg/config/config.go b/services/eventhistory/pkg/config/config.go index 087dfed6aa4..ded8be6b572 100644 --- a/services/eventhistory/pkg/config/config.go +++ b/services/eventhistory/pkg/config/config.go @@ -37,12 +37,11 @@ type GRPCConfig struct { // Store configures the store to use type Store struct { - Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;EVENTHISTORY_STORE" desc:"The type of the store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;EVENTHISTORY_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;EVENTHISTORY_STORE" desc:"The type of the store. Supported values are: 'memory', 'nats-js-kv', 'redis-sentinel', 'noop'. See the text description for details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;EVENTHISTORY_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"EVENTHISTORY_STORE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` Table string `yaml:"table" env:"EVENTHISTORY_STORE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_PERSISTENT_STORE_TTL;EVENTHISTORY_STORE_TTL" desc:"Time to live for events in the store. Defaults to '336h' (2 weeks). See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_PERSISTENT_STORE_SIZE;EVENTHISTORY_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived and used from the ocmem package though no explicit default was set." introductionVersion:"pre5.0"` AuthUsername string `yaml:"username" env:"OCIS_PERSISTENT_STORE_AUTH_USERNAME;EVENTHISTORY_STORE_AUTH_USERNAME" desc:"The username to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_PERSISTENT_STORE_AUTH_PASSWORD;EVENTHISTORY_STORE_AUTH_PASSWORD" desc:"The password to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` } diff --git a/services/frontend/README.md b/services/frontend/README.md index d66736f0b60..3705a68acd9 100644 --- a/services/frontend/README.md +++ b/services/frontend/README.md @@ -51,10 +51,6 @@ The `frontend` service can use a configured store via `FRONTEND_OCS_STAT_CACHE_S - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/frontend/pkg/config/config.go b/services/frontend/pkg/config/config.go index 0ccc7cf997d..626d85a57f7 100644 --- a/services/frontend/pkg/config/config.go +++ b/services/frontend/pkg/config/config.go @@ -129,11 +129,10 @@ type OCS struct { HomeNamespace string `yaml:"home_namespace" env:"FRONTEND_OCS_PERSONAL_NAMESPACE" desc:"Home namespace identifier." introductionVersion:"pre5.0"` AdditionalInfoAttribute string `yaml:"additional_info_attribute" env:"FRONTEND_OCS_ADDITIONAL_INFO_ATTRIBUTE" desc:"Additional information attribute for the user like {{.Mail}}." introductionVersion:"pre5.0"` StatCacheType string `yaml:"stat_cache_type" env:"OCIS_CACHE_STORE;FRONTEND_OCS_STAT_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - StatCacheNodes []string `yaml:"stat_cache_nodes" env:"OCIS_CACHE_STORE_NODES;FRONTEND_OCS_STAT_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + StatCacheNodes []string `yaml:"stat_cache_nodes" env:"OCIS_CACHE_STORE_NODES;FRONTEND_OCS_STAT_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` StatCacheDatabase string `yaml:"stat_cache_database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` StatCacheTable string `yaml:"stat_cache_table" env:"FRONTEND_OCS_STAT_CACHE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` StatCacheTTL time.Duration `yaml:"stat_cache_ttl" env:"OCIS_CACHE_TTL;FRONTEND_OCS_STAT_CACHE_TTL" desc:"Default time to live for user info in the cache. Only applied when access tokens has no expiration. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - StatCacheSize int `yaml:"stat_cache_size" env:"OCIS_CACHE_SIZE;FRONTEND_OCS_STAT_CACHE_SIZE" desc:"Max number of entries to hold in the cache." introductionVersion:"pre5.0"` StatCacheDisablePersistence bool `yaml:"stat_cache_disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;FRONTEND_OCS_STAT_CACHE_DISABLE_PERSISTENCE" desc:"Disable persistence of the cache. Only applies when using the 'nats-js-kv' store type. Defaults to false." introductionVersion:"5.0"` StatCacheAuthUsername string `yaml:"stat_cache_auth_username" env:"OCIS_CACHE_AUTH_USERNAME;FRONTEND_OCS_STAT_CACHE_AUTH_USERNAME" desc:"The username to use for authentication. Only applies when using the 'nats-js-kv' store type." introductionVersion:"5.0"` StatCacheAuthPassword string `yaml:"stat_cache_auth_password" env:"OCIS_CACHE_AUTH_PASSWORD;FRONTEND_OCS_STAT_CACHE_AUTH_PASSWORD" desc:"The password to use for authentication. Only applies when using the 'nats-js-kv' store type." introductionVersion:"5.0"` diff --git a/services/frontend/pkg/revaconfig/config.go b/services/frontend/pkg/revaconfig/config.go index 6ebd7ab4f49..e58a93c2522 100644 --- a/services/frontend/pkg/revaconfig/config.go +++ b/services/frontend/pkg/revaconfig/config.go @@ -168,7 +168,6 @@ func FrontendConfigFromStruct(cfg *config.Config, logger log.Logger) (map[string "cache_database": cfg.OCS.StatCacheDatabase, "cache_table": cfg.OCS.StatCacheTable, "cache_ttl": cfg.OCS.StatCacheTTL, - "cache_size": cfg.OCS.StatCacheSize, "cache_disable_persistence": cfg.OCS.StatCacheDisablePersistence, "cache_auth_username": cfg.OCS.StatCacheAuthUsername, "cache_auth_password": cfg.OCS.StatCacheAuthPassword, diff --git a/services/gateway/README.md b/services/gateway/README.md index c167cb33dd8..a49730bfaa7 100644 --- a/services/gateway/README.md +++ b/services/gateway/README.md @@ -15,10 +15,6 @@ Use `OCIS_CACHE_STORE` (`GATEWAY_PROVIDER_CACHE_STORE`, `GATEWAY_CREATE_HOME_CAC - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/gateway/pkg/config/config.go b/services/gateway/pkg/config/config.go index 3cdde5bc769..91047cebdcc 100644 --- a/services/gateway/pkg/config/config.go +++ b/services/gateway/pkg/config/config.go @@ -87,18 +87,16 @@ type StorageRegistry struct { // Cache holds cache config type Cache struct { ProviderCacheStore string `yaml:"provider_cache_store" env:"OCIS_CACHE_STORE;GATEWAY_PROVIDER_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - ProviderCacheNodes []string `yaml:"provider_cache_nodes" env:"OCIS_CACHE_STORE_NODES;GATEWAY_PROVIDER_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + ProviderCacheNodes []string `yaml:"provider_cache_nodes" env:"OCIS_CACHE_STORE_NODES;GATEWAY_PROVIDER_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` ProviderCacheDatabase string `yaml:"provider_cache_database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` ProviderCacheTTL time.Duration `yaml:"provider_cache_ttl" env:"OCIS_CACHE_TTL;GATEWAY_PROVIDER_CACHE_TTL" desc:"Default time to live for user info in the cache. Only applied when access tokens has no expiration. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - ProviderCacheSize int `yaml:"provider_cache_size" env:"OCIS_CACHE_SIZE;GATEWAY_PROVIDER_CACHE_SIZE" desc:"The maximum quantity of items in the cache. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not explicitly set as default." introductionVersion:"pre5.0"` ProviderCacheDisablePersistence bool `yaml:"provider_cache_disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;GATEWAY_PROVIDER_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the provider cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` ProviderCacheAuthUsername string `yaml:"provider_cache_auth_username" env:"OCIS_CACHE_AUTH_USERNAME;GATEWAY_PROVIDER_CACHE_AUTH_USERNAME" desc:"The username to use for authentication. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` ProviderCacheAuthPassword string `yaml:"provider_cache_auth_password" env:"OCIS_CACHE_AUTH_PASSWORD;GATEWAY_PROVIDER_CACHE_AUTH_PASSWORD" desc:"The password to use for authentication. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` CreateHomeCacheStore string `yaml:"create_home_cache_store" env:"OCIS_CACHE_STORE;GATEWAY_CREATE_HOME_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - CreateHomeCacheNodes []string `yaml:"create_home_cache_nodes" env:"OCIS_CACHE_STORE_NODES;GATEWAY_CREATE_HOME_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + CreateHomeCacheNodes []string `yaml:"create_home_cache_nodes" env:"OCIS_CACHE_STORE_NODES;GATEWAY_CREATE_HOME_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` CreateHomeCacheDatabase string `yaml:"create_home_cache_database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` CreateHomeCacheTTL time.Duration `yaml:"create_home_cache_ttl" env:"OCIS_CACHE_TTL;GATEWAY_CREATE_HOME_CACHE_TTL" desc:"Default time to live for user info in the cache. Only applied when access tokens has no expiration. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - CreateHomeCacheSize int `yaml:"create_home_cache_size" env:"OCIS_CACHE_SIZE;GATEWAY_CREATE_HOME_CACHE_SIZE" desc:"The maximum quantity of items in the cache. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not explicitly set as default." introductionVersion:"pre5.0"` CreateHomeCacheDisablePersistence bool `yaml:"create_home_cache_disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;GATEWAY_CREATE_HOME_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the create home cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` CreateHomeCacheAuthUsername string `yaml:"create_home_cache_auth_username" env:"OCIS_CACHE_AUTH_USERNAME;GATEWAY_CREATE_HOME_CACHE_AUTH_USERNAME" desc:"The username to use for authentication. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` CreateHomeCacheAuthPassword string `yaml:"create_home_cache_auth_password" env:"OCIS_CACHE_AUTH_PASSWORD;GATEWAY_CREATE_HOME_CACHE_AUTH_PASSWORD" desc:"The password to use for authentication. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` diff --git a/services/gateway/pkg/revaconfig/config.go b/services/gateway/pkg/revaconfig/config.go index f24fd87fc81..73d36b01169 100644 --- a/services/gateway/pkg/revaconfig/config.go +++ b/services/gateway/pkg/revaconfig/config.go @@ -71,7 +71,6 @@ func GatewayConfigFromStruct(cfg *config.Config, logger log.Logger) map[string]i "cache_database": cfg.Cache.ProviderCacheDatabase, "cache_table": "provider", "cache_ttl": cfg.Cache.ProviderCacheTTL, - "cache_size": cfg.Cache.ProviderCacheSize, "disable_persistence": cfg.Cache.ProviderCacheDisablePersistence, "cache_auth_username": cfg.Cache.ProviderCacheAuthUsername, "cache_auth_password": cfg.Cache.ProviderCacheAuthPassword, @@ -82,7 +81,6 @@ func GatewayConfigFromStruct(cfg *config.Config, logger log.Logger) map[string]i "cache_database": cfg.Cache.CreateHomeCacheDatabase, "cache_table": "create_personal_space", "cache_ttl": cfg.Cache.CreateHomeCacheTTL, - "cache_size": cfg.Cache.CreateHomeCacheSize, "cache_disable_persistence": cfg.Cache.CreateHomeCacheDisablePersistence, "cache_auth_username": cfg.Cache.CreateHomeCacheAuthUsername, "cache_auth_password": cfg.Cache.CreateHomeCacheAuthPassword, diff --git a/services/graph/README.md b/services/graph/README.md index cfd7c288425..f5b0d032704 100644 --- a/services/graph/README.md +++ b/services/graph/README.md @@ -66,10 +66,6 @@ The `graph` service can use a configured store via `GRAPH_CACHE_STORE`. Possible - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/graph/pkg/config/cache.go b/services/graph/pkg/config/cache.go index a8c25667f9d..ca7a103a8dc 100644 --- a/services/graph/pkg/config/cache.go +++ b/services/graph/pkg/config/cache.go @@ -5,11 +5,10 @@ import "time" // Cache defines the available configuration for a cache store type Cache struct { Store string `yaml:"store" env:"OCIS_CACHE_STORE;GRAPH_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;GRAPH_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;GRAPH_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"GRAPH_CACHE_STORE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` Table string `yaml:"table" env:"GRAPH_CACHE_STORE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;GRAPH_CACHE_TTL" desc:"Time to live for cache records in the graph. Defaults to '336h' (2 weeks). See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_CACHE_SIZE;GRAPH_CACHE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not explicitly set as default." introductionVersion:"pre5.0"` DisablePersistence bool `yaml:"disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;GRAPH_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` AuthUsername string `yaml:"username" env:"OCIS_CACHE_AUTH_USERNAME;GRAPH_CACHE_AUTH_USERNAME" desc:"The username to authenticate with the cache. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_CACHE_AUTH_PASSWORD;GRAPH_CACHE_AUTH_PASSWORD" desc:"The password to authenticate with the cache. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` diff --git a/services/graph/pkg/config/defaults/defaultconfig.go b/services/graph/pkg/config/defaults/defaultconfig.go index 5c3fef78216..4cd0680b0d8 100644 --- a/services/graph/pkg/config/defaults/defaultconfig.go +++ b/services/graph/pkg/config/defaults/defaultconfig.go @@ -155,7 +155,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.Cache = &config.Cache{ Store: cfg.Commons.Cache.Store, Nodes: cfg.Commons.Cache.Nodes, - Size: cfg.Commons.Cache.Size, } } else if cfg.Cache == nil { cfg.Cache = &config.Cache{} diff --git a/services/graph/pkg/service/v0/service.go b/services/graph/pkg/service/v0/service.go index 1209d42c813..03cc754efd9 100644 --- a/services/graph/pkg/service/v0/service.go +++ b/services/graph/pkg/service/v0/service.go @@ -182,7 +182,6 @@ func NewService(opts ...Option) (Graph, error) { //nolint:maintidx storeOptions := []microstore.Option{ store.Store(options.Config.Cache.Store), store.TTL(options.Config.Cache.TTL), - store.Size(options.Config.Cache.Size), microstore.Nodes(options.Config.Cache.Nodes...), microstore.Database(options.Config.Cache.Database), microstore.Table(options.Config.Cache.Table), diff --git a/services/ocs/README.md b/services/ocs/README.md index dd7a83f6d60..801fd9c536b 100644 --- a/services/ocs/README.md +++ b/services/ocs/README.md @@ -14,7 +14,7 @@ Possible stores that can be configured via `OCS_PRESIGNEDURL_SIGNING_KEYS_STORE` - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `ocisstoreservice`: Stores data in the legacy ocis store service. Requires setting `OCS_PRESIGNEDURL_SIGNING_KEYS_STORE_NODES` to `com.owncloud.api.store`. -The `memory` or `ocmem` stores cannot be used as they do not share the memory from the ocs service signing key memory store, even in a single process. +The `memory` store cannot be used as it does not share the memory from the ocs service signing key memory store, even in a single process. Make sure to configure the same store in the proxy service. diff --git a/services/postprocessing/README.md b/services/postprocessing/README.md index 305d669c241..687c6c02bf8 100644 --- a/services/postprocessing/README.md +++ b/services/postprocessing/README.md @@ -23,10 +23,6 @@ The `postprocessing` service stores its metadata via the configured store in `PO - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/postprocessing/pkg/command/server.go b/services/postprocessing/pkg/command/server.go index 8527b71dc7b..3b633ee7b14 100644 --- a/services/postprocessing/pkg/command/server.go +++ b/services/postprocessing/pkg/command/server.go @@ -55,7 +55,6 @@ func Server(cfg *config.Config) *cli.Command { st := store.Create( store.Store(cfg.Store.Store), store.TTL(cfg.Store.TTL), - store.Size(cfg.Store.Size), microstore.Nodes(cfg.Store.Nodes...), microstore.Database(cfg.Store.Database), microstore.Table(cfg.Store.Table), diff --git a/services/postprocessing/pkg/config/config.go b/services/postprocessing/pkg/config/config.go index 84ff69acdcc..70f0723702d 100644 --- a/services/postprocessing/pkg/config/config.go +++ b/services/postprocessing/pkg/config/config.go @@ -55,12 +55,11 @@ type Debug struct { // Store configures the store to use type Store struct { - Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;POSTPROCESSING_STORE" desc:"The type of the store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;POSTPROCESSING_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;POSTPROCESSING_STORE" desc:"The type of the store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;POSTPROCESSING_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"POSTPROCESSING_STORE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` Table string `yaml:"table" env:"POSTPROCESSING_STORE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_PERSISTENT_STORE_TTL;POSTPROCESSING_STORE_TTL" desc:"Time to live for events in the store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_PERSISTENT_STORE_SIZE;POSTPROCESSING_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not exclicitly set as default." introductionVersion:"pre5.0"` AuthUsername string `yaml:"username" env:"OCIS_PERSISTENT_STORE_AUTH_USERNAME;POSTPROCESSING_STORE_AUTH_USERNAME" desc:"The username to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_PERSISTENT_STORE_AUTH_PASSWORD;POSTPROCESSING_STORE_AUTH_PASSWORD" desc:"The password to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` } diff --git a/services/proxy/README.md b/services/proxy/README.md index de6337b2e27..404e5113dba 100644 --- a/services/proxy/README.md +++ b/services/proxy/README.md @@ -214,10 +214,6 @@ The `proxy` service can use a configured store via `PROXY_OIDC_USERINFO_CACHE_ST - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. @@ -238,7 +234,7 @@ To authenticate presigned URLs the proxy service needs to read signing keys from - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `ocisstoreservice`: Stores data in the legacy ocis store service. Requires setting `PROXY_PRESIGNEDURL_SIGNING_KEYS_STORE_NODES` to `com.owncloud.api.store`. -The `memory` or `ocmem` stores cannot be used as they do not share the memory from the ocs service signing key memory store, even in a single process. +The `memory` store cannot be used as it does not share the memory from the ocs service signing key memory store, even in a single process. Make sure to configure the same store in the ocs service. diff --git a/services/proxy/pkg/command/server.go b/services/proxy/pkg/command/server.go index 7dd1673be86..fb6af95e7d0 100644 --- a/services/proxy/pkg/command/server.go +++ b/services/proxy/pkg/command/server.go @@ -57,7 +57,6 @@ func Server(cfg *config.Config) *cli.Command { userInfoCache := store.Create( store.Store(cfg.OIDC.UserinfoCache.Store), store.TTL(cfg.OIDC.UserinfoCache.TTL), - store.Size(cfg.OIDC.UserinfoCache.Size), microstore.Nodes(cfg.OIDC.UserinfoCache.Nodes...), microstore.Database(cfg.OIDC.UserinfoCache.Database), microstore.Table(cfg.OIDC.UserinfoCache.Table), diff --git a/services/proxy/pkg/config/config.go b/services/proxy/pkg/config/config.go index 4f9c67a6e7f..21a54d08cac 100644 --- a/services/proxy/pkg/config/config.go +++ b/services/proxy/pkg/config/config.go @@ -128,11 +128,10 @@ type JWKS struct { // Cache is a TTL cache configuration. type Cache struct { Store string `yaml:"store" env:"OCIS_CACHE_STORE;PROXY_OIDC_USERINFO_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"addresses" env:"OCIS_CACHE_STORE_NODES;PROXY_OIDC_USERINFO_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"addresses" env:"OCIS_CACHE_STORE_NODES;PROXY_OIDC_USERINFO_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` Table string `yaml:"table" env:"PROXY_OIDC_USERINFO_CACHE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;PROXY_OIDC_USERINFO_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_CACHE_SIZE;PROXY_OIDC_USERINFO_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not explicitly set as default." introductionVersion:"pre5.0"` DisablePersistence bool `yaml:"disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;PROXY_OIDC_USERINFO_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"pre5.0"` AuthUsername string `yaml:"username" env:"OCIS_CACHE_AUTH_USERNAME;PROXY_OIDC_USERINFO_CACHE_AUTH_USERNAME" desc:"The username to authenticate with the cache. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_CACHE_AUTH_PASSWORD;PROXY_OIDC_USERINFO_CACHE_AUTH_PASSWORD" desc:"The password to authenticate with the cache. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` diff --git a/services/proxy/pkg/config/defaults/defaultconfig.go b/services/proxy/pkg/config/defaults/defaultconfig.go index 07dabe77bf5..aab86ff0615 100644 --- a/services/proxy/pkg/config/defaults/defaultconfig.go +++ b/services/proxy/pkg/config/defaults/defaultconfig.go @@ -303,7 +303,6 @@ func EnsureDefaults(cfg *config.Config) { cfg.OIDC.UserinfoCache = &config.Cache{ Store: cfg.Commons.Cache.Store, Nodes: cfg.Commons.Cache.Nodes, - Size: cfg.Commons.Cache.Size, } } else if cfg.OIDC.UserinfoCache == nil { cfg.OIDC.UserinfoCache = &config.Cache{} diff --git a/services/settings/README.md b/services/settings/README.md index 7b9ef43bf5f..122441ea148 100644 --- a/services/settings/README.md +++ b/services/settings/README.md @@ -39,10 +39,6 @@ The store used for the cache can be configured using the `SETTINGS_CACHE_STORE` - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/settings/pkg/config/config.go b/services/settings/pkg/config/config.go index a272f927892..3ecbe6f17cd 100644 --- a/services/settings/pkg/config/config.go +++ b/services/settings/pkg/config/config.go @@ -56,12 +56,11 @@ type Metadata struct { // Cache configures the cache of the Metadata store type Cache struct { Store string `yaml:"store" env:"OCIS_CACHE_STORE;SETTINGS_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"addresses" env:"OCIS_CACHE_STORE_NODES;SETTINGS_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"addresses" env:"OCIS_CACHE_STORE_NODES;SETTINGS_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` FileTable string `yaml:"files_table" env:"SETTINGS_FILE_CACHE_TABLE" desc:"The database table the store should use for the file cache." introductionVersion:"pre5.0"` DirectoryTable string `yaml:"directories_table" env:"SETTINGS_DIRECTORY_CACHE_TABLE" desc:"The database table the store should use for the directory cache." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;SETTINGS_CACHE_TTL" desc:"Default time to live for entries in the cache. Only applied when access tokens has no expiration. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_CACHE_SIZE;SETTINGS_CACHE_SIZE" desc:"The maximum quantity of items in the cache. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not exclicitly set as default." introductionVersion:"pre5.0"` DisablePersistence bool `yaml:"disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;SETTINGS_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` AuthUsername string `yaml:"username" env:"OCIS_CACHE_AUTH_USERNAME;SETTINGS_CACHE_AUTH_USERNAME" desc:"The username to authenticate with the cache. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_CACHE_AUTH_PASSWORD;SETTINGS_CACHE_AUTH_PASSWORD" desc:"The password to authenticate with the cache. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` diff --git a/services/settings/pkg/store/metadata/cache.go b/services/settings/pkg/store/metadata/cache.go index 80ebd02a5d0..aca47f06d27 100644 --- a/services/settings/pkg/store/metadata/cache.go +++ b/services/settings/pkg/store/metadata/cache.go @@ -135,7 +135,6 @@ func (c *CachedMDC) Init(ctx context.Context, id string) error { c.dirsCache = store.Create( store.Store(c.cfg.Metadata.Cache.Store), store.TTL(c.cfg.Metadata.Cache.TTL), - store.Size(c.cfg.Metadata.Cache.Size), microstore.Nodes(c.cfg.Metadata.Cache.Nodes...), microstore.Database(c.cfg.Metadata.Cache.Database), microstore.Table(c.cfg.Metadata.Cache.DirectoryTable), @@ -145,7 +144,6 @@ func (c *CachedMDC) Init(ctx context.Context, id string) error { c.filesCache = store.Create( store.Store(c.cfg.Metadata.Cache.Store), store.TTL(c.cfg.Metadata.Cache.TTL), - store.Size(c.cfg.Metadata.Cache.Size), microstore.Nodes(c.cfg.Metadata.Cache.Nodes...), microstore.Database(c.cfg.Metadata.Cache.Database), microstore.Table(c.cfg.Metadata.Cache.FileTable), diff --git a/services/storage-system/README.md b/services/storage-system/README.md index 352a0d9d014..b0734449b32 100644 --- a/services/storage-system/README.md +++ b/services/storage-system/README.md @@ -9,10 +9,6 @@ The `storage-system` service caches file metadata via the configured store in `S - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/storage-system/pkg/config/config.go b/services/storage-system/pkg/config/config.go index ec4c0f898b5..5b3679751b4 100644 --- a/services/storage-system/pkg/config/config.go +++ b/services/storage-system/pkg/config/config.go @@ -88,10 +88,9 @@ type OCISDriver struct { // Cache holds cache config type Cache struct { Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_SYSTEM_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_SYSTEM_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_SYSTEM_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;STORAGE_SYSTEM_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_CACHE_SIZE;STORAGE_SYSTEM_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not exclicitly set as default." introductionVersion:"pre5.0"` DisablePersistence bool `yaml:"disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;STORAGE_SYSTEM_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` AuthUsername string `yaml:"auth_username" env:"OCIS_CACHE_AUTH_USERNAME;STORAGE_SYSTEM_CACHE_AUTH_USERNAME" desc:"Username for the configured store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"auth_password" env:"OCIS_CACHE_AUTH_PASSWORD;STORAGE_SYSTEM_CACHE_AUTH_PASSWORD" desc:"Password for the configured store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` diff --git a/services/storage-system/pkg/revaconfig/config.go b/services/storage-system/pkg/revaconfig/config.go index d0304e05c6f..b7684a21505 100644 --- a/services/storage-system/pkg/revaconfig/config.go +++ b/services/storage-system/pkg/revaconfig/config.go @@ -168,7 +168,6 @@ func metadataDrivers(localEndpoint string, cfg *config.Config) map[string]interf "cache_nodes": cfg.FileMetadataCache.Nodes, "cache_database": cfg.FileMetadataCache.Database, "cache_ttl": cfg.FileMetadataCache.TTL, - "cache_size": cfg.FileMetadataCache.Size, "cache_disable_persistence": cfg.FileMetadataCache.DisablePersistence, "cache_auth_username": cfg.FileMetadataCache.AuthUsername, "cache_auth_password": cfg.FileMetadataCache.AuthPassword, diff --git a/services/storage-users/README.md b/services/storage-users/README.md index b529fb3f679..0f374226098 100644 --- a/services/storage-users/README.md +++ b/services/storage-users/README.md @@ -219,10 +219,6 @@ The `storage-users` service caches stat, metadata and uuids of files and folders - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/storage-users/pkg/config/config.go b/services/storage-users/pkg/config/config.go index bd272e7444c..7e76559e44c 100644 --- a/services/storage-users/pkg/config/config.go +++ b/services/storage-users/pkg/config/config.go @@ -223,10 +223,9 @@ type Events struct { // FilemetadataCache holds cache config type FilemetadataCache struct { Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_USERS_FILEMETADATA_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_USERS_FILEMETADATA_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_USERS_FILEMETADATA_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;STORAGE_USERS_FILEMETADATA_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_CACHE_SIZE;STORAGE_USERS_FILEMETADATA_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not exclicitly set as default." introductionVersion:"pre5.0"` DisablePersistence bool `yaml:"disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;STORAGE_USERS_FILEMETADATA_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` AuthUsername string `yaml:"username" env:"OCIS_CACHE_AUTH_USERNAME;STORAGE_USERS_FILEMETADATA_CACHE_AUTH_USERNAME" desc:"The username to authenticate with the cache store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_CACHE_AUTH_PASSWORD;STORAGE_USERS_FILEMETADATA_CACHE_AUTH_PASSWORD" desc:"The password to authenticate with the cache store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` @@ -235,10 +234,9 @@ type FilemetadataCache struct { // IDCache holds cache config type IDCache struct { Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_USERS_ID_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'redis-sentinel', 'nats-js-kv', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_USERS_ID_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_USERS_ID_CACHE_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;STORAGE_USERS_ID_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens have no expiration. Defaults to 300s which is derived from the underlaying package though not explicitly set as default. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_CACHE_SIZE;STORAGE_USERS_ID_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not exclicitly set as default." introductionVersion:"pre5.0"` DisablePersistence bool `yaml:"disable_persistence" env:"OCIS_CACHE_DISABLE_PERSISTENCE;STORAGE_USERS_ID_CACHE_DISABLE_PERSISTENCE" desc:"Disables persistence of the cache. Only applies when store type 'nats-js-kv' is configured. Defaults to false." introductionVersion:"5.0"` AuthUsername string `yaml:"username" env:"OCIS_CACHE_AUTH_USERNAME;STORAGE_USERS_ID_CACHE_AUTH_USERNAME" desc:"The username to authenticate with the cache store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_CACHE_AUTH_PASSWORD;STORAGE_USERS_ID_CACHE_AUTH_PASSWORD" desc:"The password to authenticate with the cache store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` diff --git a/services/storage-users/pkg/revaconfig/drivers.go b/services/storage-users/pkg/revaconfig/drivers.go index d351da4a6de..311e40c7591 100644 --- a/services/storage-users/pkg/revaconfig/drivers.go +++ b/services/storage-users/pkg/revaconfig/drivers.go @@ -101,7 +101,6 @@ func Posix(cfg *config.Config, enableFSWatch bool) map[string]interface{} { "cache_nodes": cfg.IDCache.Nodes, "cache_database": cfg.IDCache.Database, "cache_ttl": cfg.IDCache.TTL, - "cache_size": cfg.IDCache.Size, "cache_disable_persistence": cfg.IDCache.DisablePersistence, "cache_auth_username": cfg.IDCache.AuthUsername, "cache_auth_password": cfg.IDCache.AuthPassword, @@ -111,7 +110,6 @@ func Posix(cfg *config.Config, enableFSWatch bool) map[string]interface{} { "cache_nodes": cfg.FilemetadataCache.Nodes, "cache_database": cfg.FilemetadataCache.Database, "cache_ttl": cfg.FilemetadataCache.TTL, - "cache_size": cfg.FilemetadataCache.Size, "cache_disable_persistence": cfg.FilemetadataCache.DisablePersistence, "cache_auth_username": cfg.FilemetadataCache.AuthUsername, "cache_auth_password": cfg.FilemetadataCache.AuthPassword, @@ -186,7 +184,6 @@ func Ocis(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.FilemetadataCache.Nodes, "cache_database": cfg.FilemetadataCache.Database, "cache_ttl": cfg.FilemetadataCache.TTL, - "cache_size": cfg.FilemetadataCache.Size, "cache_disable_persistence": cfg.FilemetadataCache.DisablePersistence, "cache_auth_username": cfg.FilemetadataCache.AuthUsername, "cache_auth_password": cfg.FilemetadataCache.AuthPassword, @@ -196,7 +193,6 @@ func Ocis(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.IDCache.Nodes, "cache_database": cfg.IDCache.Database, "cache_ttl": cfg.IDCache.TTL, - "cache_size": cfg.IDCache.Size, "cache_disable_persistence": cfg.IDCache.DisablePersistence, "cache_auth_username": cfg.IDCache.AuthUsername, "cache_auth_password": cfg.IDCache.AuthPassword, @@ -242,7 +238,6 @@ func OcisNoEvents(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.FilemetadataCache.Nodes, "cache_database": cfg.FilemetadataCache.Database, "cache_ttl": cfg.FilemetadataCache.TTL, - "cache_size": cfg.FilemetadataCache.Size, "cache_disable_persistence": cfg.FilemetadataCache.DisablePersistence, "cache_auth_username": cfg.FilemetadataCache.AuthUsername, "cache_auth_password": cfg.FilemetadataCache.AuthPassword, @@ -252,7 +247,6 @@ func OcisNoEvents(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.IDCache.Nodes, "cache_database": cfg.IDCache.Database, "cache_ttl": cfg.IDCache.TTL, - "cache_size": cfg.IDCache.Size, "cache_disable_persistence": cfg.IDCache.DisablePersistence, "cache_auth_username": cfg.IDCache.AuthUsername, "cache_auth_password": cfg.IDCache.AuthPassword, @@ -313,7 +307,6 @@ func S3NG(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.FilemetadataCache.Nodes, "cache_database": cfg.FilemetadataCache.Database, "cache_ttl": cfg.FilemetadataCache.TTL, - "cache_size": cfg.FilemetadataCache.Size, "cache_disable_persistence": cfg.FilemetadataCache.DisablePersistence, "cache_auth_username": cfg.FilemetadataCache.AuthUsername, "cache_auth_password": cfg.FilemetadataCache.AuthPassword, @@ -323,7 +316,6 @@ func S3NG(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.IDCache.Nodes, "cache_database": cfg.IDCache.Database, "cache_ttl": cfg.IDCache.TTL, - "cache_size": cfg.IDCache.Size, "cache_disable_persistence": cfg.IDCache.DisablePersistence, "cache_auth_username": cfg.IDCache.AuthUsername, "cache_auth_password": cfg.IDCache.AuthPassword, @@ -373,7 +365,6 @@ func S3NGNoEvents(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.FilemetadataCache.Nodes, "cache_database": cfg.FilemetadataCache.Database, "cache_ttl": cfg.FilemetadataCache.TTL, - "cache_size": cfg.FilemetadataCache.Size, "cache_disable_persistence": cfg.FilemetadataCache.DisablePersistence, "cache_auth_username": cfg.FilemetadataCache.AuthUsername, "cache_auth_password": cfg.FilemetadataCache.AuthPassword, @@ -383,7 +374,6 @@ func S3NGNoEvents(cfg *config.Config) map[string]interface{} { "cache_nodes": cfg.IDCache.Nodes, "cache_database": cfg.IDCache.Database, "cache_ttl": cfg.IDCache.TTL, - "cache_size": cfg.IDCache.Size, "cache_disable_persistence": cfg.IDCache.DisablePersistence, "cache_auth_username": cfg.IDCache.AuthUsername, "cache_auth_password": cfg.IDCache.AuthPassword, diff --git a/services/userlog/README.md b/services/userlog/README.md index ef5021b8f1a..15d9c4d2858 100644 --- a/services/userlog/README.md +++ b/services/userlog/README.md @@ -20,10 +20,6 @@ The `userlog` service persists information via the configured store in `USERLOG_ - `redis-sentinel`: Stores data in a configured Redis Sentinel cluster. - `nats-js-kv`: Stores data using key-value-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/key-value-store) - `noop`: Stores nothing. Useful for testing. Not recommended in production environments. - - `ocmem`: Advanced in-memory store allowing max size. (deprecated) - - `redis`: Stores data in a configured Redis cluster. (deprecated) - - `etcd`: Stores data in a configured etcd cluster. (deprecated) - - `nats-js`: Stores data using object-store feature of [nats jetstream](https://docs.nats.io/nats-concepts/jetstream/obj_store) (deprecated) Other store types may work but are not supported currently. diff --git a/services/userlog/pkg/command/server.go b/services/userlog/pkg/command/server.go index 566c5f76e7c..b86ff9c3827 100644 --- a/services/userlog/pkg/command/server.go +++ b/services/userlog/pkg/command/server.go @@ -84,7 +84,6 @@ func Server(cfg *config.Config) *cli.Command { st := store.Create( store.Store(cfg.Persistence.Store), store.TTL(cfg.Persistence.TTL), - store.Size(cfg.Persistence.Size), microstore.Nodes(cfg.Persistence.Nodes...), microstore.Database(cfg.Persistence.Database), microstore.Table(cfg.Persistence.Table), diff --git a/services/userlog/pkg/config/config.go b/services/userlog/pkg/config/config.go index bea159cfc4f..b7b6279c85b 100644 --- a/services/userlog/pkg/config/config.go +++ b/services/userlog/pkg/config/config.go @@ -39,12 +39,11 @@ type Config struct { // Persistence configures the store to use type Persistence struct { - Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;USERLOG_STORE" desc:"The type of the store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details." introductionVersion:"pre5.0"` - Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;USERLOG_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` + Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;USERLOG_STORE" desc:"The type of the store. Supported values are: 'memory', 'nats-js-kv', 'redis-sentinel', 'noop'. See the text description for details." introductionVersion:"pre5.0"` + Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;USERLOG_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` Database string `yaml:"database" env:"USERLOG_STORE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"pre5.0"` Table string `yaml:"table" env:"USERLOG_STORE_TABLE" desc:"The database table the store should use." introductionVersion:"pre5.0"` TTL time.Duration `yaml:"ttl" env:"OCIS_PERSISTENT_STORE_TTL;USERLOG_STORE_TTL" desc:"Time to live for events in the store. Defaults to '336h' (2 weeks). See the Environment Variable Types description for more details." introductionVersion:"pre5.0"` - Size int `yaml:"size" env:"OCIS_PERSISTENT_STORE_SIZE;USERLOG_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512 which is derived from the ocmem package though not exclicitly set as default." introductionVersion:"pre5.0"` AuthUsername string `yaml:"username" env:"OCIS_PERSISTENT_STORE_AUTH_USERNAME;USERLOG_STORE_AUTH_USERNAME" desc:"The username to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` AuthPassword string `yaml:"password" env:"OCIS_PERSISTENT_STORE_AUTH_PASSWORD;USERLOG_STORE_AUTH_PASSWORD" desc:"The password to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"5.0"` } From 3d69694b749f5a5969f407a53efbacff546272e2 Mon Sep 17 00:00:00 2001 From: jkoberg Date: Tue, 15 Oct 2024 16:21:33 +0200 Subject: [PATCH 2/6] feat(ocis): remove deprecated registries Signed-off-by: jkoberg --- ocis-pkg/registry/registry.go | 27 --------------------------- ocis/README.md | 6 +----- services/nats/README.md | 2 -- 3 files changed, 1 insertion(+), 34 deletions(-) diff --git a/ocis-pkg/registry/registry.go b/ocis-pkg/registry/registry.go index bf2c9df1b59..ea3c9371678 100644 --- a/ocis-pkg/registry/registry.go +++ b/ocis-pkg/registry/registry.go @@ -8,12 +8,7 @@ import ( "time" rRegistry "github.com/cs3org/reva/v2/pkg/registry" - consulr "github.com/go-micro/plugins/v4/registry/consul" - etcdr "github.com/go-micro/plugins/v4/registry/etcd" - kubernetesr "github.com/go-micro/plugins/v4/registry/kubernetes" - mdnsr "github.com/go-micro/plugins/v4/registry/mdns" memr "github.com/go-micro/plugins/v4/registry/memory" - natsr "github.com/go-micro/plugins/v4/registry/nats" "github.com/owncloud/ocis/v2/ocis-pkg/natsjsregistry" mRegistry "go-micro.dev/v4/registry" "go-micro.dev/v4/registry/cache" @@ -68,28 +63,6 @@ func GetRegistry(opts ...Option) mRegistry.Registry { case "memory": _reg = memr.NewRegistry() cfg.DisableCache = true // no cache needed for in-memory registry - case "kubernetes": - fmt.Println("Attention: kubernetes registry is deprecated, use nats-js-kv instead") - _reg = kubernetesr.NewRegistry() - case "nats": - fmt.Println("Attention: nats registry is deprecated, use nats-js-kv instead") - _reg = natsr.NewRegistry( - mRegistry.Addrs(cfg.Addresses...), - natsr.RegisterAction("put"), - ) - case "etcd": - fmt.Println("Attention: etcd registry is deprecated, use nats-js-kv instead") - _reg = etcdr.NewRegistry( - mRegistry.Addrs(cfg.Addresses...), - ) - case "consul": - fmt.Println("Attention: consul registry is deprecated, use nats-js-kv instead") - _reg = consulr.NewRegistry( - mRegistry.Addrs(cfg.Addresses...), - ) - case "mdns": - fmt.Println("Attention: mdns registry is deprecated, use nats-js-kv instead") - _reg = mdnsr.NewRegistry() } // Disable cache if wanted diff --git a/ocis/README.md b/ocis/README.md index 043071a76f4..5836144bd24 100644 --- a/ocis/README.md +++ b/ocis/README.md @@ -9,13 +9,9 @@ This package also configures the service registry which will be used to look up Available registries are: - nats-js-kv (default) -- kubernetes - memory -- etcd (deprecated) -- consul (deprecated) -- mdns (deprecated) -To configure which registry to use, you have to set the environment variable `MICRO_REGISTRY`, and for all except `memory` and `mdns` you also have to set the registry address via `MICRO_REGISTRY_ADDRESS`. +To configure which registry to use, you have to set the environment variable `MICRO_REGISTRY`, and for all except `memory` you also have to set the registry address via `MICRO_REGISTRY_ADDRESS`. ## Memory limits diff --git a/services/nats/README.md b/services/nats/README.md index 3a9da526cf7..ff16ae63c1b 100644 --- a/services/nats/README.md +++ b/services/nats/README.md @@ -12,8 +12,6 @@ As the service name suggests, this service is based on [NATS](https://nats.io/) By default, `nats-js-kv` is configured as embedded default registry via the `MICRO_REGISTRY` environment variable. If you do not want using the build-in nats registry, set `MICRO_REGISTRY_ADDRESS` to the address of the nats-js cluster, which is the same value as `OCIS_EVENTS_ENDPOINT`. Optionally use `MICRO_REGISTRY_AUTH_USERNAME` and `MICRO_REGISTRY_AUTH_PASSWORD` to authenticate with the external nats cluster. -Note that when `MICRO_REGISTRY` is configured using `kubernetes`, the NATS service will not be used but the Kubernetes registry. In this case, the `MICRO_REGISTRY_ADDRESS` environment variable needs to be set to the url of the Kubernetes registry. - ## Persistance To be able to deliver events even after a system or service restart, nats will store events in a folder on the local filesystem. This folder can be specified by setting the `NATS_NATS_STORE_DIR` enviroment variable. If not set, the service will fall back to `$OCIS_BASE_DATA_PATH/nats`. From bf0d8ed3dcf761ec5ddd77c9e2f0495eb6492f57 Mon Sep 17 00:00:00 2001 From: jkoberg Date: Tue, 15 Oct 2024 16:59:46 +0200 Subject: [PATCH 3/6] feat(ocis): remove deprecated envvars Signed-off-by: jkoberg --- services/antivirus/pkg/config/config.go | 7 +++---- services/antivirus/pkg/config/parser/parse.go | 7 ------- services/notifications/pkg/channels/channels.go | 5 ----- services/notifications/pkg/config/config.go | 2 +- services/web/pkg/config/config.go | 7 +++---- services/web/pkg/config/parser/parse.go | 15 --------------- 6 files changed, 7 insertions(+), 36 deletions(-) diff --git a/services/antivirus/pkg/config/config.go b/services/antivirus/pkg/config/config.go index 7ce5b93bae1..fd71d3fd249 100644 --- a/services/antivirus/pkg/config/config.go +++ b/services/antivirus/pkg/config/config.go @@ -73,8 +73,7 @@ type ClamAV struct { // ICAP provides configuration options for icap type ICAP struct { - DeprecatedTimeout int64 `yaml:"timeout" env:"ANTIVIRUS_ICAP_TIMEOUT" desc:"Timeout for the ICAP client." introductionVersion:"pre5.0" deprecationVersion:"5.0" removalVersion:"%%NEXT_PRODUCTION_VERSION%%" deprecationInfo:"Changing the envvar type for consistency reasons." deprecationReplacement:"ANTIVIRUS_ICAP_SCAN_TIMEOUT"` - Timeout time.Duration `yaml:"scan_timeout" env:"ANTIVIRUS_ICAP_SCAN_TIMEOUT" desc:"Scan timeout for the ICAP client. Defaults to '5m' (5 minutes). See the Environment Variable Types description for more details." introductionVersion:"5.0"` - URL string `yaml:"url" env:"ANTIVIRUS_ICAP_URL" desc:"URL of the ICAP server." introductionVersion:"pre5.0"` - Service string `yaml:"service" env:"ANTIVIRUS_ICAP_SERVICE" desc:"The name of the ICAP service." introductionVersion:"pre5.0"` + Timeout time.Duration `yaml:"scan_timeout" env:"ANTIVIRUS_ICAP_SCAN_TIMEOUT" desc:"Scan timeout for the ICAP client. Defaults to '5m' (5 minutes). See the Environment Variable Types description for more details." introductionVersion:"5.0"` + URL string `yaml:"url" env:"ANTIVIRUS_ICAP_URL" desc:"URL of the ICAP server." introductionVersion:"pre5.0"` + Service string `yaml:"service" env:"ANTIVIRUS_ICAP_SERVICE" desc:"The name of the ICAP service." introductionVersion:"pre5.0"` } diff --git a/services/antivirus/pkg/config/parser/parse.go b/services/antivirus/pkg/config/parser/parse.go index b762b9eb571..fab4c2f0c0b 100644 --- a/services/antivirus/pkg/config/parser/parse.go +++ b/services/antivirus/pkg/config/parser/parse.go @@ -2,10 +2,8 @@ package parser import ( "errors" - "time" ociscfg "github.com/owncloud/ocis/v2/ocis-pkg/config" - "github.com/owncloud/ocis/v2/ocis-pkg/log" "github.com/owncloud/ocis/v2/services/antivirus/pkg/config" "github.com/owncloud/ocis/v2/services/antivirus/pkg/config/defaults" @@ -36,10 +34,5 @@ func ParseConfig(cfg *config.Config) error { // Validate validates our little config func Validate(cfg *config.Config) error { - if cfg.Scanner.ICAP.DeprecatedTimeout != 0 { - cfg.Scanner.ICAP.Timeout = time.Duration(cfg.Scanner.ICAP.DeprecatedTimeout) * time.Second - log.Deprecation("ANTIVIRUS_ICAP_TIMEOUT is deprecated, use ANTIVIRUS_ICAP_SCAN_TIMEOUT instead") - } - return nil } diff --git a/services/notifications/pkg/channels/channels.go b/services/notifications/pkg/channels/channels.go index c53ac84c64e..89ecb87df98 100644 --- a/services/notifications/pkg/channels/channels.go +++ b/services/notifications/pkg/channels/channels.go @@ -84,14 +84,9 @@ func (m Mail) getMailClient() (*mail.SMTPClient, error) { } switch strings.ToLower(m.conf.Notifications.SMTP.Encryption) { - case "tls": - server.Encryption = mail.EncryptionTLS - server.TLSConfig.ServerName = m.conf.Notifications.SMTP.Host case "starttls": server.Encryption = mail.EncryptionSTARTTLS server.TLSConfig.ServerName = m.conf.Notifications.SMTP.Host - case "ssl": - server.Encryption = mail.EncryptionSSL case "ssltls": server.Encryption = mail.EncryptionSSLTLS case "none": diff --git a/services/notifications/pkg/config/config.go b/services/notifications/pkg/config/config.go index 55d7d7e6478..280a6954701 100644 --- a/services/notifications/pkg/config/config.go +++ b/services/notifications/pkg/config/config.go @@ -46,7 +46,7 @@ type SMTP struct { Password string `yaml:"smtp_password" env:"NOTIFICATIONS_SMTP_PASSWORD" desc:"Password for the SMTP host to connect to." introductionVersion:"pre5.0"` Insecure bool `yaml:"insecure" env:"NOTIFICATIONS_SMTP_INSECURE" desc:"Allow insecure connections to the SMTP server." introductionVersion:"pre5.0"` Authentication string `yaml:"smtp_authentication" env:"NOTIFICATIONS_SMTP_AUTHENTICATION" desc:"Authentication method for the SMTP communication. Possible values are 'login', 'plain', 'crammd5', 'none' or 'auto'. If set to 'auto' or unset, the authentication method is automatically negotiated with the server." introductionVersion:"pre5.0"` - Encryption string `yaml:"smtp_encryption" env:"NOTIFICATIONS_SMTP_ENCRYPTION" desc:"Encryption method for the SMTP communication. Possible values are 'starttls', 'ssl', 'ssltls', 'tls' and 'none'." introductionVersion:"pre5.0" deprecationVersion:"5.0.0" removalVersion:"%%NEXT_PRODUCTION_VERSION%%" deprecationInfo:"The NOTIFICATIONS_SMTP_ENCRYPTION values 'ssl' and 'tls' are deprecated and will be removed in the future." deprecationReplacement:"Use 'starttls' instead of 'tls' and 'ssltls' instead of 'ssl'."` + Encryption string `yaml:"smtp_encryption" env:"NOTIFICATIONS_SMTP_ENCRYPTION" desc:"Encryption method for the SMTP communication. Possible values are 'starttls', 'ssltls' and 'none'." introductionVersion:"pre5.0"` } // Events combines the configuration options for the event bus. diff --git a/services/web/pkg/config/config.go b/services/web/pkg/config/config.go index 34e5995d0a0..d5d93153570 100644 --- a/services/web/pkg/config/config.go +++ b/services/web/pkg/config/config.go @@ -31,10 +31,9 @@ type Config struct { // Asset defines the available asset configuration. type Asset struct { - DeprecatedPath string `yaml:"path" env:"WEB_ASSET_PATH" desc:"Serve ownCloud Web assets from a path on the filesystem instead of the builtin assets." introductionVersion:"pre5.0" deprecationVersion:"5.1.0" removalVersion:"%%NEXT_PRODUCTION_VERSION%%" deprecationInfo:"The WEB_ASSET_PATH is deprecated and will be removed in the future." deprecationReplacement:"Use WEB_ASSET_CORE_PATH instead."` - CorePath string `yaml:"core_path" env:"WEB_ASSET_CORE_PATH" desc:"Serve ownCloud Web assets from a path on the filesystem instead of the builtin assets. If not defined, the root directory derives from $OCIS_BASE_DATA_PATH/web/assets/core" introductionVersion:"6.0.0"` - ThemesPath string `yaml:"themes_path" env:"OCIS_ASSET_THEMES_PATH;WEB_ASSET_THEMES_PATH" desc:"Serve ownCloud themes from a path on the filesystem instead of the builtin assets. If not defined, the root directory derives from $OCIS_BASE_DATA_PATH/web/assets/themes" introductionVersion:"6.0.0"` - AppsPath string `yaml:"apps_path" env:"WEB_ASSET_APPS_PATH" desc:"Serve ownCloud Web apps assets from a path on the filesystem instead of the builtin assets. If not defined, the root directory derives from $OCIS_BASE_DATA_PATH/web/assets/apps" introductionVersion:"6.0.0"` + CorePath string `yaml:"core_path" env:"WEB_ASSET_CORE_PATH" desc:"Serve ownCloud Web assets from a path on the filesystem instead of the builtin assets. If not defined, the root directory derives from $OCIS_BASE_DATA_PATH/web/assets/core" introductionVersion:"6.0.0"` + ThemesPath string `yaml:"themes_path" env:"OCIS_ASSET_THEMES_PATH;WEB_ASSET_THEMES_PATH" desc:"Serve ownCloud themes from a path on the filesystem instead of the builtin assets. If not defined, the root directory derives from $OCIS_BASE_DATA_PATH/web/assets/themes" introductionVersion:"6.0.0"` + AppsPath string `yaml:"apps_path" env:"WEB_ASSET_APPS_PATH" desc:"Serve ownCloud Web apps assets from a path on the filesystem instead of the builtin assets. If not defined, the root directory derives from $OCIS_BASE_DATA_PATH/web/assets/apps" introductionVersion:"6.0.0"` } // CustomStyle references additional css to be loaded into ownCloud Web. diff --git a/services/web/pkg/config/parser/parse.go b/services/web/pkg/config/parser/parse.go index 4789b6aaaca..5a2ffe5d6f6 100644 --- a/services/web/pkg/config/parser/parse.go +++ b/services/web/pkg/config/parser/parse.go @@ -5,7 +5,6 @@ import ( ociscfg "github.com/owncloud/ocis/v2/ocis-pkg/config" "github.com/owncloud/ocis/v2/ocis-pkg/config/envdecode" - "github.com/owncloud/ocis/v2/ocis-pkg/log" "github.com/owncloud/ocis/v2/ocis-pkg/shared" "github.com/owncloud/ocis/v2/services/web/pkg/config" "github.com/owncloud/ocis/v2/services/web/pkg/config/defaults" @@ -45,19 +44,5 @@ func Validate(cfg *config.Config) error { return shared.MissingJWTTokenError(cfg.Service.Name) } - // deprecation: migration requested - // check if the config still uses the deprecated asset path, if so, - // log a warning and copy the value to the setting that is actually used - // this is to ensure a smooth transition from the old to the new core asset path (pre 5.1 to 5.1) - if cfg.Asset.DeprecatedPath != "" { - if cfg.Asset.CorePath == "" { - cfg.Asset.CorePath = cfg.Asset.DeprecatedPath - } - - // message should be logged to the console, - // do not use a logger here because the message MUST be visible independent of the log level - log.Deprecation("WEB_ASSET_PATH is deprecated and will be removed in the future. Use WEB_ASSET_CORE_PATH instead.") - } - return nil } From 6fec0fa2bd281db71a00a80d1181ad4bd81e56b3 Mon Sep 17 00:00:00 2001 From: jkoberg Date: Tue, 15 Oct 2024 16:22:49 +0200 Subject: [PATCH 4/6] feat(vendor): clean gomod Signed-off-by: jkoberg --- go.mod | 15 - go.sum | 73 - vendor/github.com/armon/go-metrics/.gitignore | 26 - .../github.com/armon/go-metrics/.travis.yml | 13 - vendor/github.com/armon/go-metrics/LICENSE | 20 - vendor/github.com/armon/go-metrics/README.md | 91 - .../github.com/armon/go-metrics/const_unix.go | 12 - .../armon/go-metrics/const_windows.go | 13 - vendor/github.com/armon/go-metrics/inmem.go | 339 ---- .../armon/go-metrics/inmem_endpoint.go | 162 -- .../armon/go-metrics/inmem_signal.go | 117 -- vendor/github.com/armon/go-metrics/metrics.go | 299 --- vendor/github.com/armon/go-metrics/sink.go | 132 -- vendor/github.com/armon/go-metrics/start.go | 158 -- vendor/github.com/armon/go-metrics/statsd.go | 184 -- .../github.com/armon/go-metrics/statsite.go | 172 -- .../plugins/v4/registry/consul/LICENSE | 191 -- .../plugins/v4/registry/consul/consul.go | 465 ----- .../plugins/v4/registry/consul/encoding.go | 171 -- .../plugins/v4/registry/consul/options.go | 101 - .../plugins/v4/registry/consul/watcher.go | 299 --- .../go-micro/plugins/v4/registry/etcd/LICENSE | 191 -- .../go-micro/plugins/v4/registry/etcd/etcd.go | 423 ----- .../plugins/v4/registry/etcd/options.go | 37 - .../plugins/v4/registry/etcd/watcher.go | 91 - .../plugins/v4/registry/kubernetes/LICENSE | 191 -- .../plugins/v4/registry/kubernetes/README.md | 66 - .../registry/kubernetes/client/api/request.go | 220 --- .../kubernetes/client/api/response.go | 90 - .../v4/registry/kubernetes/client/client.go | 142 -- .../registry/kubernetes/client/kubernetes.go | 35 - .../v4/registry/kubernetes/client/utils.go | 87 - .../registry/kubernetes/client/watch/body.go | 109 -- .../registry/kubernetes/client/watch/watch.go | 27 - .../v4/registry/kubernetes/kubernetes.go | 321 ---- .../plugins/v4/registry/kubernetes/watcher.go | 267 --- .../go-micro/plugins/v4/registry/mdns/LICENSE | 191 -- .../go-micro/plugins/v4/registry/mdns/mdns.go | 16 - .../go-micro/plugins/v4/registry/nats/LICENSE | 191 -- .../go-micro/plugins/v4/registry/nats/nats.go | 422 ----- .../plugins/v4/registry/nats/options.go | 87 - .../go-micro/plugins/v4/registry/nats/util.go | 109 -- .../plugins/v4/registry/nats/watcher.go | 39 - .../github.com/hashicorp/consul/api/LICENSE | 356 ---- .../github.com/hashicorp/consul/api/README.md | 77 - vendor/github.com/hashicorp/consul/api/acl.go | 1625 ----------------- .../github.com/hashicorp/consul/api/agent.go | 1428 --------------- vendor/github.com/hashicorp/consul/api/api.go | 1277 ------------- .../hashicorp/consul/api/catalog.go | 377 ---- .../hashicorp/consul/api/config_entry.go | 644 ------- .../consul/api/config_entry_discoverychain.go | 370 ---- .../consul/api/config_entry_exports.go | 82 - .../consul/api/config_entry_gateways.go | 304 --- .../api/config_entry_inline_certificate.go | 46 - .../consul/api/config_entry_intentions.go | 100 - .../consul/api/config_entry_jwt_provider.go | 310 ---- .../hashicorp/consul/api/config_entry_mesh.go | 90 - .../consul/api/config_entry_rate_limit_ip.go | 91 - .../consul/api/config_entry_routes.go | 245 --- .../consul/api/config_entry_sameness_group.go | 29 - .../consul/api/config_entry_status.go | 339 ---- .../hashicorp/consul/api/connect.go | 18 - .../hashicorp/consul/api/connect_ca.go | 201 -- .../hashicorp/consul/api/connect_intention.go | 461 ----- .../hashicorp/consul/api/coordinate.go | 122 -- .../github.com/hashicorp/consul/api/debug.go | 141 -- .../hashicorp/consul/api/discovery_chain.go | 283 --- .../github.com/hashicorp/consul/api/event.go | 114 -- .../github.com/hashicorp/consul/api/health.go | 398 ---- .../hashicorp/consul/api/internal.go | 64 - vendor/github.com/hashicorp/consul/api/kv.go | 307 ---- .../github.com/hashicorp/consul/api/lock.go | 411 ----- .../hashicorp/consul/api/namespace.go | 227 --- .../hashicorp/consul/api/operator.go | 14 - .../hashicorp/consul/api/operator_area.go | 209 --- .../hashicorp/consul/api/operator_audit.go | 40 - .../consul/api/operator_autopilot.go | 404 ---- .../hashicorp/consul/api/operator_keyring.go | 110 -- .../hashicorp/consul/api/operator_license.go | 134 -- .../hashicorp/consul/api/operator_raft.go | 127 -- .../hashicorp/consul/api/operator_segment.go | 14 - .../hashicorp/consul/api/operator_usage.go | 57 - .../hashicorp/consul/api/partition.go | 167 -- .../hashicorp/consul/api/peering.go | 295 --- .../hashicorp/consul/api/prepared_query.go | 269 --- vendor/github.com/hashicorp/consul/api/raw.go | 27 - .../hashicorp/consul/api/semaphore.go | 533 ------ .../hashicorp/consul/api/session.go | 246 --- .../hashicorp/consul/api/snapshot.go | 57 - .../github.com/hashicorp/consul/api/status.go | 70 - vendor/github.com/hashicorp/consul/api/txn.go | 249 --- .../hashicorp/consul/api/watch/funcs.go | 351 ---- .../hashicorp/consul/api/watch/plan.go | 258 --- .../hashicorp/consul/api/watch/watch.go | 296 --- .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 ---- .../hashicorp/go-cleanhttp/README.md | 30 - .../hashicorp/go-cleanhttp/cleanhttp.go | 58 - .../github.com/hashicorp/go-cleanhttp/doc.go | 20 - .../hashicorp/go-cleanhttp/handlers.go | 48 - .../hashicorp/go-immutable-radix/.gitignore | 24 - .../hashicorp/go-immutable-radix/CHANGELOG.md | 23 - .../hashicorp/go-immutable-radix/LICENSE | 363 ---- .../hashicorp/go-immutable-radix/README.md | 66 - .../hashicorp/go-immutable-radix/edges.go | 21 - .../hashicorp/go-immutable-radix/iradix.go | 676 ------- .../hashicorp/go-immutable-radix/iter.go | 205 --- .../hashicorp/go-immutable-radix/node.go | 334 ---- .../hashicorp/go-immutable-radix/raw_iter.go | 78 - .../go-immutable-radix/reverse_iter.go | 239 --- .../hashicorp/go-rootcerts/.travis.yml | 12 - .../github.com/hashicorp/go-rootcerts/LICENSE | 363 ---- .../hashicorp/go-rootcerts/Makefile | 8 - .../hashicorp/go-rootcerts/README.md | 44 - .../github.com/hashicorp/go-rootcerts/doc.go | 9 - .../hashicorp/go-rootcerts/rootcerts.go | 123 -- .../hashicorp/go-rootcerts/rootcerts_base.go | 12 - .../go-rootcerts/rootcerts_darwin.go | 48 - .../github.com/hashicorp/golang-lru/LICENSE | 364 ---- .../hashicorp/golang-lru/simplelru/lru.go | 177 -- .../golang-lru/simplelru/lru_interface.go | 40 - vendor/github.com/hashicorp/serf/LICENSE | 354 ---- .../hashicorp/serf/coordinate/client.go | 243 --- .../hashicorp/serf/coordinate/config.go | 77 - .../hashicorp/serf/coordinate/coordinate.go | 203 -- .../hashicorp/serf/coordinate/phantom.go | 187 -- .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - .../mitchellh/go-homedir/homedir.go | 167 -- .../mitchellh/hashstructure/LICENSE | 21 - .../mitchellh/hashstructure/README.md | 67 - .../mitchellh/hashstructure/hashstructure.go | 422 ----- .../mitchellh/hashstructure/include.go | 22 - vendor/modules.txt | 48 - 133 files changed, 26463 deletions(-) delete mode 100644 vendor/github.com/armon/go-metrics/.gitignore delete mode 100644 vendor/github.com/armon/go-metrics/.travis.yml delete mode 100644 vendor/github.com/armon/go-metrics/LICENSE delete mode 100644 vendor/github.com/armon/go-metrics/README.md delete mode 100644 vendor/github.com/armon/go-metrics/const_unix.go delete mode 100644 vendor/github.com/armon/go-metrics/const_windows.go delete mode 100644 vendor/github.com/armon/go-metrics/inmem.go delete mode 100644 vendor/github.com/armon/go-metrics/inmem_endpoint.go delete mode 100644 vendor/github.com/armon/go-metrics/inmem_signal.go delete mode 100644 vendor/github.com/armon/go-metrics/metrics.go delete mode 100644 vendor/github.com/armon/go-metrics/sink.go delete mode 100644 vendor/github.com/armon/go-metrics/start.go delete mode 100644 vendor/github.com/armon/go-metrics/statsd.go delete mode 100644 vendor/github.com/armon/go-metrics/statsite.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/consul/LICENSE delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/consul/consul.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/consul/encoding.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/consul/options.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/consul/watcher.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/etcd/LICENSE delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/etcd/etcd.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/etcd/options.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/etcd/watcher.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/LICENSE delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/README.md delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/request.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/response.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/client.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/kubernetes.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/utils.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/body.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/watch.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/kubernetes.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/kubernetes/watcher.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/mdns/LICENSE delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/mdns/mdns.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/nats/LICENSE delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/nats/nats.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/nats/options.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/nats/util.go delete mode 100644 vendor/github.com/go-micro/plugins/v4/registry/nats/watcher.go delete mode 100644 vendor/github.com/hashicorp/consul/api/LICENSE delete mode 100644 vendor/github.com/hashicorp/consul/api/README.md delete mode 100644 vendor/github.com/hashicorp/consul/api/acl.go delete mode 100644 vendor/github.com/hashicorp/consul/api/agent.go delete mode 100644 vendor/github.com/hashicorp/consul/api/api.go delete mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_exports.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_gateways.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_intentions.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_mesh.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_routes.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go delete mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_status.go delete mode 100644 vendor/github.com/hashicorp/consul/api/connect.go delete mode 100644 vendor/github.com/hashicorp/consul/api/connect_ca.go delete mode 100644 vendor/github.com/hashicorp/consul/api/connect_intention.go delete mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go delete mode 100644 vendor/github.com/hashicorp/consul/api/debug.go delete mode 100644 vendor/github.com/hashicorp/consul/api/discovery_chain.go delete mode 100644 vendor/github.com/hashicorp/consul/api/event.go delete mode 100644 vendor/github.com/hashicorp/consul/api/health.go delete mode 100644 vendor/github.com/hashicorp/consul/api/internal.go delete mode 100644 vendor/github.com/hashicorp/consul/api/kv.go delete mode 100644 vendor/github.com/hashicorp/consul/api/lock.go delete mode 100644 vendor/github.com/hashicorp/consul/api/namespace.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_audit.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_license.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_segment.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_usage.go delete mode 100644 vendor/github.com/hashicorp/consul/api/partition.go delete mode 100644 vendor/github.com/hashicorp/consul/api/peering.go delete mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go delete mode 100644 vendor/github.com/hashicorp/consul/api/raw.go delete mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go delete mode 100644 vendor/github.com/hashicorp/consul/api/session.go delete mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/status.go delete mode 100644 vendor/github.com/hashicorp/consul/api/txn.go delete mode 100644 vendor/github.com/hashicorp/consul/api/watch/funcs.go delete mode 100644 vendor/github.com/hashicorp/consul/api/watch/plan.go delete mode 100644 vendor/github.com/hashicorp/consul/api/watch/watch.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/README.md delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/edges.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iradix.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iter.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/node.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go delete mode 100644 vendor/github.com/hashicorp/serf/LICENSE delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/mitchellh/hashstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/hashstructure/README.md delete mode 100644 vendor/github.com/mitchellh/hashstructure/hashstructure.go delete mode 100644 vendor/github.com/mitchellh/hashstructure/include.go diff --git a/go.mod b/go.mod index 43a04934dc9..af98b0d6a0e 100644 --- a/go.mod +++ b/go.mod @@ -27,12 +27,7 @@ require ( github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 github.com/go-micro/plugins/v4/client/grpc v1.2.1 github.com/go-micro/plugins/v4/logger/zerolog v1.2.0 - github.com/go-micro/plugins/v4/registry/consul v1.2.1 - github.com/go-micro/plugins/v4/registry/etcd v1.2.0 - github.com/go-micro/plugins/v4/registry/kubernetes v1.1.2 - github.com/go-micro/plugins/v4/registry/mdns v1.2.0 github.com/go-micro/plugins/v4/registry/memory v1.2.0 - github.com/go-micro/plugins/v4/registry/nats v1.2.2 github.com/go-micro/plugins/v4/server/grpc v1.2.0 github.com/go-micro/plugins/v4/server/http v1.2.2 github.com/go-micro/plugins/v4/store/nats-js-kv v0.0.0-20231226212146-94a49ba3e06e @@ -132,7 +127,6 @@ require ( github.com/ajg/form v1.5.1 // indirect github.com/alexedwards/argon2id v1.0.0 // indirect github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect - github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aws/aws-sdk-go v1.45.1 // indirect @@ -235,15 +229,8 @@ require ( github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/schema v1.4.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/hashicorp/consul/api v1.25.1 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-msgpack v1.1.5 // indirect github.com/hashicorp/go-plugin v1.6.1 // indirect - github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect @@ -275,9 +262,7 @@ require ( github.com/minio/minio-go/v7 v7.0.66 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/go.sum b/go.sum index ad7677497f5..caac38209c5 100644 --- a/go.sum +++ b/go.sum @@ -64,7 +64,6 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CiscoM31/godata v1.0.10 h1:DZdJ6M8QNh4HquvDDOqNLu6h77Wl86KGK7Qlbmb90sk= github.com/CiscoM31/godata v1.0.10/go.mod h1:ZMiT6JuD3Rm83HEtiTx4JEChsd25YCrxchKGag/sdTc= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DeepDiver1975/secure v0.0.0-20240611112133-abc838fb797c h1:ocsNvQ2tNHme4v/lTs17HROamc7mFzZfzWcg4m+UXN0= github.com/DeepDiver1975/secure v0.0.0-20240611112133-abc838fb797c/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= @@ -119,8 +118,6 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -221,8 +218,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= @@ -315,7 +310,6 @@ github.com/evanphx/json-patch/v5 v5.5.0 h1:bAmFiUJ+o0o2B4OiTFeE3MqCOtyo+jjPP9iZ0 github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exoscale/egoscale v0.46.0/go.mod h1:mpEXBpROAa/2i5GC0r33rfxG+TxSEka11g1PIXt9+zc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= @@ -402,18 +396,8 @@ github.com/go-micro/plugins/v4/events/natsjs v1.2.2-0.20231215124540-f7f8d3274bf github.com/go-micro/plugins/v4/events/natsjs v1.2.2-0.20231215124540-f7f8d3274bf9/go.mod h1:cL0O63th39fZ+M/aRJvajz7Qnmv+UTXugOq1k3qrYiQ= github.com/go-micro/plugins/v4/logger/zerolog v1.2.0 h1:JZ516VQ9zekRoi868XG7x0EWxZ2AMq/euHIBChITsTI= github.com/go-micro/plugins/v4/logger/zerolog v1.2.0/go.mod h1:AieYOIeOxobYa5B8WGEqxXM3Ndi26tDIu9fZ4RYkCvQ= -github.com/go-micro/plugins/v4/registry/consul v1.2.1 h1:3wctYMtstwQLCjoJ1HA6mKGGFF1hcdKDv5MzHakB1jE= -github.com/go-micro/plugins/v4/registry/consul v1.2.1/go.mod h1:wTat7/K9XQ+i64VbbcMYFcEwipYfSgJM51HcA/sgsM4= -github.com/go-micro/plugins/v4/registry/etcd v1.2.0 h1:tcHlU1GzvX3oZa8WQH8ylMCGie5qD5g98YWTESJjeqQ= -github.com/go-micro/plugins/v4/registry/etcd v1.2.0/go.mod h1:CQeTHkjN3xMtIQsynaTTquMz2sHEdsTfRIfFzrX7aug= -github.com/go-micro/plugins/v4/registry/kubernetes v1.1.2 h1:ZLDgMhpnqj7RjDphedrIqCbmCL2z8m7+8Ex5tdT8GxU= -github.com/go-micro/plugins/v4/registry/kubernetes v1.1.2/go.mod h1:u78+qWLUq8jxu/CF4UW+1UUtNgBz67x27ar2kV5Dd/o= -github.com/go-micro/plugins/v4/registry/mdns v1.2.0 h1:BsGnco+PgycvSX+HS0XbeUQEPoPT3a+dDiHWV6dbVDs= -github.com/go-micro/plugins/v4/registry/mdns v1.2.0/go.mod h1:re0JvO5F56n59WEDaAKj2jtboKa2dklAd6iWyz5xa54= github.com/go-micro/plugins/v4/registry/memory v1.2.0 h1:R0G2tltffuG+fQnk+/JuAdgEJX4J+LuOafZDoNd8ow0= github.com/go-micro/plugins/v4/registry/memory v1.2.0/go.mod h1:4t5YiXJT5BVtMWImxy807lY3ywjv/PHpdHnN+LXSsI4= -github.com/go-micro/plugins/v4/registry/nats v1.2.2 h1:+M1ZzEA77CXJBvhPb71Q8+dZ5vZPkpDTvqWzzwpWSS0= -github.com/go-micro/plugins/v4/registry/nats v1.2.2/go.mod h1:RDsrDhcjJggCzAvvUzo/Bzy68d9s9+tu0KOfofXVCog= github.com/go-micro/plugins/v4/server/grpc v1.2.0 h1:lXfM+/0oE/u1g0hVBYsvbP4lYOYXYOmwf5qH7ghi7Cc= github.com/go-micro/plugins/v4/server/grpc v1.2.0/go.mod h1:+Ah9Pf/vMSXxBM3fup/hc3N+zN2as3nIpcRaR4sBjnY= github.com/go-micro/plugins/v4/server/http v1.2.2 h1:UK2/09AU0zV3wHELuR72TZzVU2vTUhbx9qrRGrQSIWg= @@ -533,8 +517,6 @@ github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -613,64 +595,34 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjw github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= -github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= -github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= -github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -724,7 +676,6 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -803,7 +754,6 @@ github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxq github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -814,7 +764,6 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -836,9 +785,7 @@ github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b/go.mod h1:KirJ github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 h1:Z/i1e+gTZrmcGeZyWckaLfucYG6KYOXLWo4co8pZYNY= github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103/go.mod h1:o9YPB5aGP8ob35Vy6+vyq3P3bWe7NQWzf+JLiXCiMaE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.40/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mileusna/useragent v1.3.4 h1:MiuRRuvGjEie1+yZHO88UBYg8YBC/ddF6T7F56i3PCk= @@ -852,19 +799,15 @@ github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNd github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= -github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -947,8 +890,6 @@ github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwU github.com/pablodz/inotifywaitgo v0.0.7 h1:1ii49dGBnRn0t1Sz7RGZS6/NberPEDQprwKHN49Bv6U= github.com/pablodz/inotifywaitgo v0.0.7/go.mod h1:OtzRCsYTJlIr+vAzlOtauTkfQ1c25ebFuXq8tbbf8cw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= @@ -975,7 +916,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= @@ -987,7 +927,6 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -1009,7 +948,6 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -1023,7 +961,6 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -1066,7 +1003,6 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sacloud/libsacloud v1.36.2/go.mod h1:P7YAOVmnIn3DKHqCZcUKYUXmSwGBm3yS7IBEjKVSrjg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210127161313-bd30bebeac4f/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= @@ -1172,7 +1108,6 @@ github.com/trustelem/zxcvbn v1.0.1 h1:mp4JFtzdDYGj9WYSD3KQSkwwUumWNFzXaAjckaTYps github.com/trustelem/zxcvbn v1.0.1/go.mod h1:zonUyKeh7sw6psPf/e3DtRqkRyZvAbOfjNz/aO7YQ5s= github.com/tus/tusd/v2 v2.5.0 h1:72/2Ws3kF0upYqENcbb0yr4aca0HByDNkIjOs5yh0es= github.com/tus/tusd/v2 v2.5.0/go.mod h1:dUDNT4TvTMSqsTZvdAokc8e5xsZ+SsxvOCOPoyEeOJQ= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= @@ -1284,7 +1219,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1380,7 +1314,6 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1447,7 +1380,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1481,8 +1413,6 @@ golang.org/x/sys v0.0.0-20201110211018-35f3e6cf4a65/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1497,7 +1427,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1560,7 +1489,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1568,7 +1496,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore deleted file mode 100644 index e5750f5720e..00000000000 --- a/vendor/github.com/armon/go-metrics/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -/metrics.out - -.idea diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml deleted file mode 100644 index 87d230c8d78..00000000000 --- a/vendor/github.com/armon/go-metrics/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - "1.x" - -env: - - GO111MODULE=on - -install: - - go get ./... - -script: - - go test ./... diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE deleted file mode 100644 index 106569e542b..00000000000 --- a/vendor/github.com/armon/go-metrics/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md deleted file mode 100644 index aa73348c08d..00000000000 --- a/vendor/github.com/armon/go-metrics/README.md +++ /dev/null @@ -1,91 +0,0 @@ -go-metrics -========== - -This library provides a `metrics` package which can be used to instrument code, -expose application metrics, and profile runtime performance in a flexible manner. - -Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) - -Sinks ------ - -The `metrics` package makes use of a `MetricSink` interface to support delivery -to any type of backend. Currently the following sinks are provided: - -* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) -* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) -* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) -* InmemSink : Provides in-memory aggregation, can be used to export stats -* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. -* BlackholeSink : Sinks to nowhere - -In addition to the sinks, the `InmemSignal` can be used to catch a signal, -and dump a formatted output of recent metrics. For example, when a process gets -a SIGUSR1, it can dump to stderr recent performance metrics for debugging. - -Labels ------- - -Most metrics do have an equivalent ending with `WithLabels`, such methods -allow to push metrics with labels and use some features of underlying Sinks -(ex: translated into Prometheus labels). - -Since some of these labels may increase greatly cardinality of metrics, the -library allow to filter labels using a blacklist/whitelist filtering system -which is global to all metrics. - -* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. -* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. - -By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that -no tags are filetered at all, but it allow to a user to globally block some tags with high -cardinality at application level. - -Examples --------- - -Here is an example of using the package: - -```go -func SlowMethod() { - // Profiling the runtime of a method - defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) -} - -// Configure a statsite sink as the global metrics sink -sink, _ := metrics.NewStatsiteSink("statsite:8125") -metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) - -// Emit a Key/Value pair -metrics.EmitKey([]string{"questions", "meaning of life"}, 42) -``` - -Here is an example of setting up a signal handler: - -```go -// Setup the inmem sink and signal handler -inm := metrics.NewInmemSink(10*time.Second, time.Minute) -sig := metrics.DefaultInmemSignal(inm) -metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) - -// Run some code -inm.SetGauge([]string{"foo"}, 42) -inm.EmitKey([]string{"bar"}, 30) - -inm.IncrCounter([]string{"baz"}, 42) -inm.IncrCounter([]string{"baz"}, 1) -inm.IncrCounter([]string{"baz"}, 80) - -inm.AddSample([]string{"method", "wow"}, 42) -inm.AddSample([]string{"method", "wow"}, 100) -inm.AddSample([]string{"method", "wow"}, 22) - -.... -``` - -When a signal comes in, output like the following will be dumped to stderr: - - [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 - [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 - [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 - [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go deleted file mode 100644 index 31098dd57e5..00000000000 --- a/vendor/github.com/armon/go-metrics/const_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - DefaultSignal = syscall.SIGUSR1 -) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go deleted file mode 100644 index 38136af3e42..00000000000 --- a/vendor/github.com/armon/go-metrics/const_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - // Windows has no SIGUSR1, use SIGBREAK - DefaultSignal = syscall.Signal(21) -) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go deleted file mode 100644 index 7c427aca979..00000000000 --- a/vendor/github.com/armon/go-metrics/inmem.go +++ /dev/null @@ -1,339 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "math" - "net/url" - "strings" - "sync" - "time" -) - -var spaceReplacer = strings.NewReplacer(" ", "_") - -// InmemSink provides a MetricSink that does in-memory aggregation -// without sending metrics over a network. It can be embedded within -// an application to provide profiling information. -type InmemSink struct { - // How long is each aggregation interval - interval time.Duration - - // Retain controls how many metrics interval we keep - retain time.Duration - - // maxIntervals is the maximum length of intervals. - // It is retain / interval. - maxIntervals int - - // intervals is a slice of the retained intervals - intervals []*IntervalMetrics - intervalLock sync.RWMutex - - rateDenom float64 -} - -// IntervalMetrics stores the aggregated metrics -// for a specific interval -type IntervalMetrics struct { - sync.RWMutex - - // The start time of the interval - Interval time.Time - - // Gauges maps the key to the last set value - Gauges map[string]GaugeValue - - // Points maps the string to the list of emitted values - // from EmitKey - Points map[string][]float32 - - // Counters maps the string key to a sum of the counter - // values - Counters map[string]SampledValue - - // Samples maps the key to an AggregateSample, - // which has the rolled up view of a sample - Samples map[string]SampledValue - - // done is closed when this interval has ended, and a new IntervalMetrics - // has been created to receive any future metrics. - done chan struct{} -} - -// NewIntervalMetrics creates a new IntervalMetrics for a given interval -func NewIntervalMetrics(intv time.Time) *IntervalMetrics { - return &IntervalMetrics{ - Interval: intv, - Gauges: make(map[string]GaugeValue), - Points: make(map[string][]float32), - Counters: make(map[string]SampledValue), - Samples: make(map[string]SampledValue), - done: make(chan struct{}), - } -} - -// AggregateSample is used to hold aggregate metrics -// about a sample -type AggregateSample struct { - Count int // The count of emitted pairs - Rate float64 // The values rate per time unit (usually 1 second) - Sum float64 // The sum of values - SumSq float64 `json:"-"` // The sum of squared values - Min float64 // Minimum value - Max float64 // Maximum value - LastUpdated time.Time `json:"-"` // When value was last updated -} - -// Computes a Stddev of the values -func (a *AggregateSample) Stddev() float64 { - num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) - div := float64(a.Count * (a.Count - 1)) - if div == 0 { - return 0 - } - return math.Sqrt(num / div) -} - -// Computes a mean of the values -func (a *AggregateSample) Mean() float64 { - if a.Count == 0 { - return 0 - } - return a.Sum / float64(a.Count) -} - -// Ingest is used to update a sample -func (a *AggregateSample) Ingest(v float64, rateDenom float64) { - a.Count++ - a.Sum += v - a.SumSq += (v * v) - if v < a.Min || a.Count == 1 { - a.Min = v - } - if v > a.Max || a.Count == 1 { - a.Max = v - } - a.Rate = float64(a.Sum) / rateDenom - a.LastUpdated = time.Now() -} - -func (a *AggregateSample) String() string { - if a.Count == 0 { - return "Count: 0" - } else if a.Stddev() == 0 { - return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) - } else { - return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", - a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) - } -} - -// NewInmemSinkFromURL creates an InmemSink from a URL. It is used -// (and tested) from NewMetricSinkFromURL. -func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { - params := u.Query() - - interval, err := time.ParseDuration(params.Get("interval")) - if err != nil { - return nil, fmt.Errorf("Bad 'interval' param: %s", err) - } - - retain, err := time.ParseDuration(params.Get("retain")) - if err != nil { - return nil, fmt.Errorf("Bad 'retain' param: %s", err) - } - - return NewInmemSink(interval, retain), nil -} - -// NewInmemSink is used to construct a new in-memory sink. -// Uses an aggregation interval and maximum retention period. -func NewInmemSink(interval, retain time.Duration) *InmemSink { - rateTimeUnit := time.Second - i := &InmemSink{ - interval: interval, - retain: retain, - maxIntervals: int(retain / interval), - rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), - } - i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) - return i -} - -func (i *InmemSink) SetGauge(key []string, val float32) { - i.SetGaugeWithLabels(key, val, nil) -} - -func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - k, name := i.flattenKeyLabels(key, labels) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} -} - -func (i *InmemSink) EmitKey(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - vals := intv.Points[k] - intv.Points[k] = append(vals, val) -} - -func (i *InmemSink) IncrCounter(key []string, val float32) { - i.IncrCounterWithLabels(key, val, nil) -} - -func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - k, name := i.flattenKeyLabels(key, labels) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg, ok := intv.Counters[k] - if !ok { - agg = SampledValue{ - Name: name, - AggregateSample: &AggregateSample{}, - Labels: labels, - } - intv.Counters[k] = agg - } - agg.Ingest(float64(val), i.rateDenom) -} - -func (i *InmemSink) AddSample(key []string, val float32) { - i.AddSampleWithLabels(key, val, nil) -} - -func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - k, name := i.flattenKeyLabels(key, labels) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg, ok := intv.Samples[k] - if !ok { - agg = SampledValue{ - Name: name, - AggregateSample: &AggregateSample{}, - Labels: labels, - } - intv.Samples[k] = agg - } - agg.Ingest(float64(val), i.rateDenom) -} - -// Data is used to retrieve all the aggregated metrics -// Intervals may be in use, and a read lock should be acquired -func (i *InmemSink) Data() []*IntervalMetrics { - // Get the current interval, forces creation - i.getInterval() - - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - n := len(i.intervals) - intervals := make([]*IntervalMetrics, n) - - copy(intervals[:n-1], i.intervals[:n-1]) - current := i.intervals[n-1] - - // make its own copy for current interval - intervals[n-1] = &IntervalMetrics{} - copyCurrent := intervals[n-1] - current.RLock() - *copyCurrent = *current - // RWMutex is not safe to copy, so create a new instance on the copy - copyCurrent.RWMutex = sync.RWMutex{} - - copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) - for k, v := range current.Gauges { - copyCurrent.Gauges[k] = v - } - // saved values will be not change, just copy its link - copyCurrent.Points = make(map[string][]float32, len(current.Points)) - for k, v := range current.Points { - copyCurrent.Points[k] = v - } - copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) - for k, v := range current.Counters { - copyCurrent.Counters[k] = v.deepCopy() - } - copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) - for k, v := range current.Samples { - copyCurrent.Samples[k] = v.deepCopy() - } - current.RUnlock() - - return intervals -} - -// getInterval returns the current interval. A new interval is created if no -// previous interval exists, or if the current time is beyond the window for the -// current interval. -func (i *InmemSink) getInterval() *IntervalMetrics { - intv := time.Now().Truncate(i.interval) - - // Attempt to return the existing interval first, because it only requires - // a read lock. - i.intervalLock.RLock() - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - defer i.intervalLock.RUnlock() - return i.intervals[n-1] - } - i.intervalLock.RUnlock() - - i.intervalLock.Lock() - defer i.intervalLock.Unlock() - - // Re-check for an existing interval now that the lock is re-acquired. - n = len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - - current := NewIntervalMetrics(intv) - i.intervals = append(i.intervals, current) - if n > 0 { - close(i.intervals[n-1].done) - } - - n++ - // Prune old intervals if the count exceeds the max. - if n >= i.maxIntervals { - copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) - i.intervals = i.intervals[:i.maxIntervals] - } - return current -} - -// Flattens the key for formatting, removes spaces -func (i *InmemSink) flattenKey(parts []string) string { - buf := &bytes.Buffer{} - - joined := strings.Join(parts, ".") - - spaceReplacer.WriteString(buf, joined) - - return buf.String() -} - -// Flattens the key for formatting along with its labels, removes spaces -func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { - key := i.flattenKey(parts) - buf := bytes.NewBufferString(key) - - for _, label := range labels { - spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) - } - - return buf.String(), key -} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go deleted file mode 100644 index 24eefa96389..00000000000 --- a/vendor/github.com/armon/go-metrics/inmem_endpoint.go +++ /dev/null @@ -1,162 +0,0 @@ -package metrics - -import ( - "context" - "fmt" - "net/http" - "sort" - "time" -) - -// MetricsSummary holds a roll-up of metrics info for a given interval -type MetricsSummary struct { - Timestamp string - Gauges []GaugeValue - Points []PointValue - Counters []SampledValue - Samples []SampledValue -} - -type GaugeValue struct { - Name string - Hash string `json:"-"` - Value float32 - - Labels []Label `json:"-"` - DisplayLabels map[string]string `json:"Labels"` -} - -type PointValue struct { - Name string - Points []float32 -} - -type SampledValue struct { - Name string - Hash string `json:"-"` - *AggregateSample - Mean float64 - Stddev float64 - - Labels []Label `json:"-"` - DisplayLabels map[string]string `json:"Labels"` -} - -// deepCopy allocates a new instance of AggregateSample -func (source *SampledValue) deepCopy() SampledValue { - dest := *source - if source.AggregateSample != nil { - dest.AggregateSample = &AggregateSample{} - *dest.AggregateSample = *source.AggregateSample - } - return dest -} - -// DisplayMetrics returns a summary of the metrics from the most recent finished interval. -func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - data := i.Data() - - var interval *IntervalMetrics - n := len(data) - switch { - case n == 0: - return nil, fmt.Errorf("no metric intervals have been initialized yet") - case n == 1: - // Show the current interval if it's all we have - interval = data[0] - default: - // Show the most recent finished interval if we have one - interval = data[n-2] - } - - return newMetricSummaryFromInterval(interval), nil -} - -func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary { - interval.RLock() - defer interval.RUnlock() - - summary := MetricsSummary{ - Timestamp: interval.Interval.Round(time.Second).UTC().String(), - Gauges: make([]GaugeValue, 0, len(interval.Gauges)), - Points: make([]PointValue, 0, len(interval.Points)), - } - - // Format and sort the output of each metric type, so it gets displayed in a - // deterministic order. - for name, points := range interval.Points { - summary.Points = append(summary.Points, PointValue{name, points}) - } - sort.Slice(summary.Points, func(i, j int) bool { - return summary.Points[i].Name < summary.Points[j].Name - }) - - for hash, value := range interval.Gauges { - value.Hash = hash - value.DisplayLabels = make(map[string]string) - for _, label := range value.Labels { - value.DisplayLabels[label.Name] = label.Value - } - value.Labels = nil - - summary.Gauges = append(summary.Gauges, value) - } - sort.Slice(summary.Gauges, func(i, j int) bool { - return summary.Gauges[i].Hash < summary.Gauges[j].Hash - }) - - summary.Counters = formatSamples(interval.Counters) - summary.Samples = formatSamples(interval.Samples) - - return summary -} - -func formatSamples(source map[string]SampledValue) []SampledValue { - output := make([]SampledValue, 0, len(source)) - for hash, sample := range source { - displayLabels := make(map[string]string) - for _, label := range sample.Labels { - displayLabels[label.Name] = label.Value - } - - output = append(output, SampledValue{ - Name: sample.Name, - Hash: hash, - AggregateSample: sample.AggregateSample, - Mean: sample.AggregateSample.Mean(), - Stddev: sample.AggregateSample.Stddev(), - DisplayLabels: displayLabels, - }) - } - sort.Slice(output, func(i, j int) bool { - return output[i].Hash < output[j].Hash - }) - - return output -} - -type Encoder interface { - Encode(interface{}) error -} - -// Stream writes metrics using encoder.Encode each time an interval ends. Runs -// until the request context is cancelled, or the encoder returns an error. -// The caller is responsible for logging any errors from encoder. -func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) { - interval := i.getInterval() - - for { - select { - case <-interval.done: - summary := newMetricSummaryFromInterval(interval) - if err := encoder.Encode(summary); err != nil { - return - } - - // update interval to the next one - interval = i.getInterval() - case <-ctx.Done(): - return - } - } -} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go deleted file mode 100644 index 0937f4aedf7..00000000000 --- a/vendor/github.com/armon/go-metrics/inmem_signal.go +++ /dev/null @@ -1,117 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "io" - "os" - "os/signal" - "strings" - "sync" - "syscall" -) - -// InmemSignal is used to listen for a given signal, and when received, -// to dump the current metrics from the InmemSink to an io.Writer -type InmemSignal struct { - signal syscall.Signal - inm *InmemSink - w io.Writer - sigCh chan os.Signal - - stop bool - stopCh chan struct{} - stopLock sync.Mutex -} - -// NewInmemSignal creates a new InmemSignal which listens for a given signal, -// and dumps the current metrics out to a writer -func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { - i := &InmemSignal{ - signal: sig, - inm: inmem, - w: w, - sigCh: make(chan os.Signal, 1), - stopCh: make(chan struct{}), - } - signal.Notify(i.sigCh, sig) - go i.run() - return i -} - -// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 -// and writes output to stderr. Windows uses SIGBREAK -func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { - return NewInmemSignal(inmem, DefaultSignal, os.Stderr) -} - -// Stop is used to stop the InmemSignal from listening -func (i *InmemSignal) Stop() { - i.stopLock.Lock() - defer i.stopLock.Unlock() - - if i.stop { - return - } - i.stop = true - close(i.stopCh) - signal.Stop(i.sigCh) -} - -// run is a long running routine that handles signals -func (i *InmemSignal) run() { - for { - select { - case <-i.sigCh: - i.dumpStats() - case <-i.stopCh: - return - } - } -} - -// dumpStats is used to dump the data to output writer -func (i *InmemSignal) dumpStats() { - buf := bytes.NewBuffer(nil) - - data := i.inm.Data() - // Skip the last period which is still being aggregated - for j := 0; j < len(data)-1; j++ { - intv := data[j] - intv.RLock() - for _, val := range intv.Gauges { - name := i.flattenLabels(val.Name, val.Labels) - fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) - } - for name, vals := range intv.Points { - for _, val := range vals { - fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) - } - } - for _, agg := range intv.Counters { - name := i.flattenLabels(agg.Name, agg.Labels) - fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) - } - for _, agg := range intv.Samples { - name := i.flattenLabels(agg.Name, agg.Labels) - fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) - } - intv.RUnlock() - } - - // Write out the bytes - i.w.Write(buf.Bytes()) -} - -// Flattens the key for formatting along with its labels, removes spaces -func (i *InmemSignal) flattenLabels(name string, labels []Label) string { - buf := bytes.NewBufferString(name) - replacer := strings.NewReplacer(" ", "_", ":", "_") - - for _, label := range labels { - replacer.WriteString(buf, ".") - replacer.WriteString(buf, label.Value) - } - - return buf.String() -} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go deleted file mode 100644 index 36642a42937..00000000000 --- a/vendor/github.com/armon/go-metrics/metrics.go +++ /dev/null @@ -1,299 +0,0 @@ -package metrics - -import ( - "runtime" - "strings" - "time" - - iradix "github.com/hashicorp/go-immutable-radix" -) - -type Label struct { - Name string - Value string -} - -func (m *Metrics) SetGauge(key []string, val float32) { - m.SetGaugeWithLabels(key, val, nil) -} - -func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { - if m.HostName != "" { - if m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } else if m.EnableHostname { - key = insert(0, m.HostName, key) - } - } - if m.EnableTypePrefix { - key = insert(0, "gauge", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - m.sink.SetGaugeWithLabels(key, val, labelsFiltered) -} - -func (m *Metrics) EmitKey(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "kv", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - allowed, _ := m.allowMetric(key, nil) - if !allowed { - return - } - m.sink.EmitKey(key, val) -} - -func (m *Metrics) IncrCounter(key []string, val float32) { - m.IncrCounterWithLabels(key, val, nil) -} - -func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { - if m.HostName != "" && m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } - if m.EnableTypePrefix { - key = insert(0, "counter", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - m.sink.IncrCounterWithLabels(key, val, labelsFiltered) -} - -func (m *Metrics) AddSample(key []string, val float32) { - m.AddSampleWithLabels(key, val, nil) -} - -func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { - if m.HostName != "" && m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } - if m.EnableTypePrefix { - key = insert(0, "sample", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - m.sink.AddSampleWithLabels(key, val, labelsFiltered) -} - -func (m *Metrics) MeasureSince(key []string, start time.Time) { - m.MeasureSinceWithLabels(key, start, nil) -} - -func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { - if m.HostName != "" && m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } - if m.EnableTypePrefix { - key = insert(0, "timer", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - now := time.Now() - elapsed := now.Sub(start) - msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) - m.sink.AddSampleWithLabels(key, msec, labelsFiltered) -} - -// UpdateFilter overwrites the existing filter with the given rules. -func (m *Metrics) UpdateFilter(allow, block []string) { - m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) -} - -// UpdateFilterAndLabels overwrites the existing filter with the given rules. -func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { - m.filterLock.Lock() - defer m.filterLock.Unlock() - - m.AllowedPrefixes = allow - m.BlockedPrefixes = block - - if allowedLabels == nil { - // Having a white list means we take only elements from it - m.allowedLabels = nil - } else { - m.allowedLabels = make(map[string]bool) - for _, v := range allowedLabels { - m.allowedLabels[v] = true - } - } - m.blockedLabels = make(map[string]bool) - for _, v := range blockedLabels { - m.blockedLabels[v] = true - } - m.AllowedLabels = allowedLabels - m.BlockedLabels = blockedLabels - - m.filter = iradix.New() - for _, prefix := range m.AllowedPrefixes { - m.filter, _, _ = m.filter.Insert([]byte(prefix), true) - } - for _, prefix := range m.BlockedPrefixes { - m.filter, _, _ = m.filter.Insert([]byte(prefix), false) - } -} - -func (m *Metrics) Shutdown() { - if ss, ok := m.sink.(ShutdownSink); ok { - ss.Shutdown() - } -} - -// labelIsAllowed return true if a should be included in metric -// the caller should lock m.filterLock while calling this method -func (m *Metrics) labelIsAllowed(label *Label) bool { - labelName := (*label).Name - if m.blockedLabels != nil { - _, ok := m.blockedLabels[labelName] - if ok { - // If present, let's remove this label - return false - } - } - if m.allowedLabels != nil { - _, ok := m.allowedLabels[labelName] - return ok - } - // Allow by default - return true -} - -// filterLabels return only allowed labels -// the caller should lock m.filterLock while calling this method -func (m *Metrics) filterLabels(labels []Label) []Label { - if labels == nil { - return nil - } - toReturn := []Label{} - for _, label := range labels { - if m.labelIsAllowed(&label) { - toReturn = append(toReturn, label) - } - } - return toReturn -} - -// Returns whether the metric should be allowed based on configured prefix filters -// Also return the applicable labels -func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { - m.filterLock.RLock() - defer m.filterLock.RUnlock() - - if m.filter == nil || m.filter.Len() == 0 { - return m.Config.FilterDefault, m.filterLabels(labels) - } - - _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) - if !ok { - return m.Config.FilterDefault, m.filterLabels(labels) - } - - return allowed.(bool), m.filterLabels(labels) -} - -// Periodically collects runtime stats to publish -func (m *Metrics) collectStats() { - for { - time.Sleep(m.ProfileInterval) - m.EmitRuntimeStats() - } -} - -// Emits various runtime statsitics -func (m *Metrics) EmitRuntimeStats() { - // Export number of Goroutines - numRoutines := runtime.NumGoroutine() - m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) - - // Export memory stats - var stats runtime.MemStats - runtime.ReadMemStats(&stats) - m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) - m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) - m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) - m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) - m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) - m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) - m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) - - // Export info about the last few GC runs - num := stats.NumGC - - // Handle wrap around - if num < m.lastNumGC { - m.lastNumGC = 0 - } - - // Ensure we don't scan more than 256 - if num-m.lastNumGC >= 256 { - m.lastNumGC = num - 255 - } - - for i := m.lastNumGC; i < num; i++ { - pause := stats.PauseNs[i%256] - m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) - } - m.lastNumGC = num -} - -// Creates a new slice with the provided string value as the first element -// and the provided slice values as the remaining values. -// Ordering of the values in the provided input slice is kept in tact in the output slice. -func insert(i int, v string, s []string) []string { - // Allocate new slice to avoid modifying the input slice - newS := make([]string, len(s)+1) - - // Copy s[0, i-1] into newS - for j := 0; j < i; j++ { - newS[j] = s[j] - } - - // Insert provided element at index i - newS[i] = v - - // Copy s[i, len(s)-1] into newS starting at newS[i+1] - for j := i; j < len(s); j++ { - newS[j+1] = s[j] - } - - return newS -} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go deleted file mode 100644 index 6f4108ff405..00000000000 --- a/vendor/github.com/armon/go-metrics/sink.go +++ /dev/null @@ -1,132 +0,0 @@ -package metrics - -import ( - "fmt" - "net/url" -) - -// The MetricSink interface is used to transmit metrics information -// to an external system -type MetricSink interface { - // A Gauge should retain the last value it is set to - SetGauge(key []string, val float32) - SetGaugeWithLabels(key []string, val float32, labels []Label) - - // Should emit a Key/Value pair for each call - EmitKey(key []string, val float32) - - // Counters should accumulate values - IncrCounter(key []string, val float32) - IncrCounterWithLabels(key []string, val float32, labels []Label) - - // Samples are for timing information, where quantiles are used - AddSample(key []string, val float32) - AddSampleWithLabels(key []string, val float32, labels []Label) -} - -type ShutdownSink interface { - MetricSink - - // Shutdown the metric sink, flush metrics to storage, and cleanup resources. - // Called immediately prior to application exit. Implementations must block - // until metrics are flushed to storage. - Shutdown() -} - -// BlackholeSink is used to just blackhole messages -type BlackholeSink struct{} - -func (*BlackholeSink) SetGauge(key []string, val float32) {} -func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} -func (*BlackholeSink) EmitKey(key []string, val float32) {} -func (*BlackholeSink) IncrCounter(key []string, val float32) {} -func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} -func (*BlackholeSink) AddSample(key []string, val float32) {} -func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} - -// FanoutSink is used to sink to fanout values to multiple sinks -type FanoutSink []MetricSink - -func (fh FanoutSink) SetGauge(key []string, val float32) { - fh.SetGaugeWithLabels(key, val, nil) -} - -func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - for _, s := range fh { - s.SetGaugeWithLabels(key, val, labels) - } -} - -func (fh FanoutSink) EmitKey(key []string, val float32) { - for _, s := range fh { - s.EmitKey(key, val) - } -} - -func (fh FanoutSink) IncrCounter(key []string, val float32) { - fh.IncrCounterWithLabels(key, val, nil) -} - -func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - for _, s := range fh { - s.IncrCounterWithLabels(key, val, labels) - } -} - -func (fh FanoutSink) AddSample(key []string, val float32) { - fh.AddSampleWithLabels(key, val, nil) -} - -func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - for _, s := range fh { - s.AddSampleWithLabels(key, val, labels) - } -} - -func (fh FanoutSink) Shutdown() { - for _, s := range fh { - if ss, ok := s.(ShutdownSink); ok { - ss.Shutdown() - } - } -} - -// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided -// by each sink type -type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) - -// sinkRegistry supports the generic NewMetricSink function by mapping URL -// schemes to metric sink factory functions -var sinkRegistry = map[string]sinkURLFactoryFunc{ - "statsd": NewStatsdSinkFromURL, - "statsite": NewStatsiteSinkFromURL, - "inmem": NewInmemSinkFromURL, -} - -// NewMetricSinkFromURL allows a generic URL input to configure any of the -// supported sinks. The scheme of the URL identifies the type of the sink, the -// and query parameters are used to set options. -// -// "statsd://" - Initializes a StatsdSink. The host and port are passed through -// as the "addr" of the sink -// -// "statsite://" - Initializes a StatsiteSink. The host and port become the -// "addr" of the sink -// -// "inmem://" - Initializes an InmemSink. The host and port are ignored. The -// "interval" and "duration" query parameters must be specified with valid -// durations, see NewInmemSink for details. -func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - sinkURLFactoryFunc := sinkRegistry[u.Scheme] - if sinkURLFactoryFunc == nil { - return nil, fmt.Errorf( - "cannot create metric sink, unrecognized sink name: %q", u.Scheme) - } - - return sinkURLFactoryFunc(u) -} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go deleted file mode 100644 index 38976f8dc93..00000000000 --- a/vendor/github.com/armon/go-metrics/start.go +++ /dev/null @@ -1,158 +0,0 @@ -package metrics - -import ( - "os" - "sync" - "sync/atomic" - "time" - - iradix "github.com/hashicorp/go-immutable-radix" -) - -// Config is used to configure metrics settings -type Config struct { - ServiceName string // Prefixed with keys to separate services - HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname - EnableHostname bool // Enable prefixing gauge values with hostname - EnableHostnameLabel bool // Enable adding hostname to labels - EnableServiceLabel bool // Enable adding service to labels - EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) - EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") - TimerGranularity time.Duration // Granularity of timers. - ProfileInterval time.Duration // Interval to profile runtime metrics - - AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator - BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator - AllowedLabels []string // A list of metric labels to allow, with '.' as the separator - BlockedLabels []string // A list of metric labels to block, with '.' as the separator - FilterDefault bool // Whether to allow metrics by default -} - -// Metrics represents an instance of a metrics sink that can -// be used to emit -type Metrics struct { - Config - lastNumGC uint32 - sink MetricSink - filter *iradix.Tree - allowedLabels map[string]bool - blockedLabels map[string]bool - filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access -} - -// Shared global metrics instance -var globalMetrics atomic.Value // *Metrics - -func init() { - // Initialize to a blackhole sink to avoid errors - globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) -} - -// Default returns the shared global metrics instance. -func Default() *Metrics { - return globalMetrics.Load().(*Metrics) -} - -// DefaultConfig provides a sane default configuration -func DefaultConfig(serviceName string) *Config { - c := &Config{ - ServiceName: serviceName, // Use client provided service - HostName: "", - EnableHostname: true, // Enable hostname prefix - EnableRuntimeMetrics: true, // Enable runtime profiling - EnableTypePrefix: false, // Disable type prefix - TimerGranularity: time.Millisecond, // Timers are in milliseconds - ProfileInterval: time.Second, // Poll runtime every second - FilterDefault: true, // Don't filter metrics by default - } - - // Try to get the hostname - name, _ := os.Hostname() - c.HostName = name - return c -} - -// New is used to create a new instance of Metrics -func New(conf *Config, sink MetricSink) (*Metrics, error) { - met := &Metrics{} - met.Config = *conf - met.sink = sink - met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) - - // Start the runtime collector - if conf.EnableRuntimeMetrics { - go met.collectStats() - } - return met, nil -} - -// NewGlobal is the same as New, but it assigns the metrics object to be -// used globally as well as returning it. -func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { - metrics, err := New(conf, sink) - if err == nil { - globalMetrics.Store(metrics) - } - return metrics, err -} - -// Proxy all the methods to the globalMetrics instance -func SetGauge(key []string, val float32) { - globalMetrics.Load().(*Metrics).SetGauge(key, val) -} - -func SetGaugeWithLabels(key []string, val float32, labels []Label) { - globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) -} - -func EmitKey(key []string, val float32) { - globalMetrics.Load().(*Metrics).EmitKey(key, val) -} - -func IncrCounter(key []string, val float32) { - globalMetrics.Load().(*Metrics).IncrCounter(key, val) -} - -func IncrCounterWithLabels(key []string, val float32, labels []Label) { - globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) -} - -func AddSample(key []string, val float32) { - globalMetrics.Load().(*Metrics).AddSample(key, val) -} - -func AddSampleWithLabels(key []string, val float32, labels []Label) { - globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) -} - -func MeasureSince(key []string, start time.Time) { - globalMetrics.Load().(*Metrics).MeasureSince(key, start) -} - -func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { - globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) -} - -func UpdateFilter(allow, block []string) { - globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) -} - -// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels -// and blockedLabels - when not nil - allow filtering of labels in order to -// block/allow globally labels (especially useful when having large number of -// values for a given label). See README.md for more information about usage. -func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { - globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) -} - -// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage. -// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources. -// This is intended for use immediately prior to application exit. -func Shutdown() { - m := globalMetrics.Load().(*Metrics) - // Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a - // reason to expect that calls to the library will successfully collect metrics after Shutdown - // has been called. - globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) - m.Shutdown() -} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go deleted file mode 100644 index 1bfffce46e2..00000000000 --- a/vendor/github.com/armon/go-metrics/statsd.go +++ /dev/null @@ -1,184 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "log" - "net" - "net/url" - "strings" - "time" -) - -const ( - // statsdMaxLen is the maximum size of a packet - // to send to statsd - statsdMaxLen = 1400 -) - -// StatsdSink provides a MetricSink that can be used -// with a statsite or statsd metrics server. It uses -// only UDP packets, while StatsiteSink uses TCP. -type StatsdSink struct { - addr string - metricQueue chan string -} - -// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used -// (and tested) from NewMetricSinkFromURL. -func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { - return NewStatsdSink(u.Host) -} - -// NewStatsdSink is used to create a new StatsdSink -func NewStatsdSink(addr string) (*StatsdSink, error) { - s := &StatsdSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsd -func (s *StatsdSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsdSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsdSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsdSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsdSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsdSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Flattens the key along with labels for formatting, removes spaces -func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { - for _, label := range labels { - parts = append(parts, label.Value) - } - return s.flattenKey(parts) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsdSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsdSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Create a buffer - buf := bytes.NewBuffer(nil) - - // Attempt to connect - sock, err = net.Dial("udp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsd! Err: %s", err) - goto WAIT - } - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Check if this would overflow the packet size - if len(metric)+buf.Len() > statsdMaxLen { - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error writing to statsd! Err: %s", err) - goto WAIT - } - } - - // Append to the buffer - buf.WriteString(metric) - - case <-ticker.C: - if buf.Len() == 0 { - continue - } - - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error flushing to statsd! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go deleted file mode 100644 index 6c0d284d2dd..00000000000 --- a/vendor/github.com/armon/go-metrics/statsite.go +++ /dev/null @@ -1,172 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "net/url" - "strings" - "time" -) - -const ( - // We force flush the statsite metrics after this period of - // inactivity. Prevents stats from getting stuck in a buffer - // forever. - flushInterval = 100 * time.Millisecond -) - -// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used -// (and tested) from NewMetricSinkFromURL. -func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { - return NewStatsiteSink(u.Host) -} - -// StatsiteSink provides a MetricSink that can be used with a -// statsite metrics server -type StatsiteSink struct { - addr string - metricQueue chan string -} - -// NewStatsiteSink is used to create a new StatsiteSink -func NewStatsiteSink(addr string) (*StatsiteSink, error) { - s := &StatsiteSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsite -func (s *StatsiteSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsiteSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsiteSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsiteSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsiteSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsiteSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Flattens the key along with labels for formatting, removes spaces -func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { - for _, label := range labels { - parts = append(parts, label.Value) - } - return s.flattenKey(parts) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsiteSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsiteSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - var buffered *bufio.Writer - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Attempt to connect - sock, err = net.Dial("tcp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsite! Err: %s", err) - goto WAIT - } - - // Create a buffered writer - buffered = bufio.NewWriter(sock) - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Try to send to statsite - _, err := buffered.Write([]byte(metric)) - if err != nil { - log.Printf("[ERR] Error writing to statsite! Err: %s", err) - goto WAIT - } - case <-ticker.C: - if err := buffered.Flush(); err != nil { - log.Printf("[ERR] Error flushing to statsite! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/consul/LICENSE b/vendor/github.com/go-micro/plugins/v4/registry/consul/LICENSE deleted file mode 100644 index 7d74e027ca9..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/consul/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Asim Aslam. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-micro/plugins/v4/registry/consul/consul.go b/vendor/github.com/go-micro/plugins/v4/registry/consul/consul.go deleted file mode 100644 index cdada28f199..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/consul/consul.go +++ /dev/null @@ -1,465 +0,0 @@ -package consul - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/http" - "runtime" - "strconv" - "strings" - "sync" - "time" - - consul "github.com/hashicorp/consul/api" - hash "github.com/mitchellh/hashstructure" - "go-micro.dev/v4/registry" - "go-micro.dev/v4/util/cmd" - mnet "go-micro.dev/v4/util/net" -) - -type consulRegistry struct { - Address []string - opts registry.Options - - client *consul.Client - config *consul.Config - - // connect enabled - connect bool - - queryOptions *consul.QueryOptions - - sync.Mutex - register map[string]uint64 - // lastChecked tracks when a node was last checked as existing in Consul - lastChecked map[string]time.Time -} - -func init() { - cmd.DefaultRegistries["consul"] = NewRegistry -} - -func getDeregisterTTL(t time.Duration) time.Duration { - // splay slightly for the watcher? - splay := time.Second * 5 - deregTTL := t + splay - - // consul has a minimum timeout on deregistration of 1 minute. - if t < time.Minute { - deregTTL = time.Minute + splay - } - - return deregTTL -} - -func newTransport(config *tls.Config) *http.Transport { - if config == nil { - config = &tls.Config{ - InsecureSkipVerify: true, - } - } - - t := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: config, - } - runtime.SetFinalizer(&t, func(tr **http.Transport) { - (*tr).CloseIdleConnections() - }) - return t -} - -func configure(c *consulRegistry, opts ...registry.Option) { - // set opts - for _, o := range opts { - o(&c.opts) - } - - // use default non pooled config - config := consul.DefaultNonPooledConfig() - - if c.opts.Context != nil { - // Use the consul config passed in the options, if available - if co, ok := c.opts.Context.Value("consul_config").(*consul.Config); ok { - config = co - } - if cn, ok := c.opts.Context.Value("consul_connect").(bool); ok { - c.connect = cn - } - - // Use the consul query options passed in the options, if available - if qo, ok := c.opts.Context.Value("consul_query_options").(*consul.QueryOptions); ok && qo != nil { - c.queryOptions = qo - } - if as, ok := c.opts.Context.Value("consul_allow_stale").(bool); ok { - c.queryOptions.AllowStale = as - } - } - - // check if there are any addrs - var addrs []string - - // iterate the options addresses - for _, address := range c.opts.Addrs { - // check we have a port - addr, port, err := net.SplitHostPort(address) - if ae, ok := err.(*net.AddrError); ok && ae.Err == "missing port in address" { - port = "8500" - addr = address - addrs = append(addrs, net.JoinHostPort(addr, port)) - } else if err == nil { - addrs = append(addrs, net.JoinHostPort(addr, port)) - } - } - - // set the addrs - if len(addrs) > 0 { - c.Address = addrs - config.Address = c.Address[0] - } - - if config.HttpClient == nil { - config.HttpClient = new(http.Client) - } - - // requires secure connection? - if c.opts.Secure || c.opts.TLSConfig != nil { - config.Scheme = "https" - // We're going to support InsecureSkipVerify - config.HttpClient.Transport = newTransport(c.opts.TLSConfig) - } - - // set timeout - if c.opts.Timeout > 0 { - config.HttpClient.Timeout = c.opts.Timeout - } - - // set the config - c.config = config - - // remove client - c.client = nil - - // setup the client - c.Client() -} - -func (c *consulRegistry) Init(opts ...registry.Option) error { - configure(c, opts...) - return nil -} - -func (c *consulRegistry) Deregister(s *registry.Service, opts ...registry.DeregisterOption) error { - if len(s.Nodes) == 0 { - return errors.New("Require at least one node") - } - - // delete our hash and time check of the service - c.Lock() - delete(c.register, s.Name) - delete(c.lastChecked, s.Name) - c.Unlock() - - node := s.Nodes[0] - return c.Client().Agent().ServiceDeregister(node.Id) -} - -func (c *consulRegistry) Register(s *registry.Service, opts ...registry.RegisterOption) error { - if len(s.Nodes) == 0 { - return errors.New("Require at least one node") - } - - var regTCPCheck bool - var regInterval time.Duration - var regHTTPCheck bool - var httpCheckConfig consul.AgentServiceCheck - - var options registry.RegisterOptions - for _, o := range opts { - o(&options) - } - - if c.opts.Context != nil { - if tcpCheckInterval, ok := c.opts.Context.Value("consul_tcp_check").(time.Duration); ok { - regTCPCheck = true - regInterval = tcpCheckInterval - } - var ok bool - if httpCheckConfig, ok = c.opts.Context.Value("consul_http_check_config").(consul.AgentServiceCheck); ok { - regHTTPCheck = true - } - } - - // create hash of service; uint64 - h, err := hash.Hash(s, nil) - if err != nil { - return err - } - - // use first node - node := s.Nodes[0] - - // get existing hash and last checked time - c.Lock() - v, ok := c.register[s.Name] - lastChecked := c.lastChecked[s.Name] - c.Unlock() - - // if it's already registered and matches then just pass the check - if ok && v == h { - if options.TTL == time.Duration(0) { - // ensure that our service hasn't been deregistered by Consul - if time.Since(lastChecked) <= getDeregisterTTL(regInterval) { - return nil - } - services, _, err := c.Client().Health().Checks(s.Name, c.queryOptions) - if err == nil { - for _, v := range services { - if v.ServiceID == node.Id { - return nil - } - } - } - } else { - // if the err is nil we're all good, bail out - // if not, we don't know what the state is, so full re-register - if err := c.Client().Agent().PassTTL("service:"+node.Id, ""); err == nil { - return nil - } - } - } - - // encode the tags - tags := encodeMetadata(node.Metadata) - tags = append(tags, encodeEndpoints(s.Endpoints)...) - tags = append(tags, encodeVersion(s.Version)...) - - var check *consul.AgentServiceCheck - - if regTCPCheck { - deregTTL := getDeregisterTTL(regInterval) - - check = &consul.AgentServiceCheck{ - TCP: node.Address, - Interval: fmt.Sprintf("%v", regInterval), - DeregisterCriticalServiceAfter: fmt.Sprintf("%v", deregTTL), - } - - } else if regHTTPCheck { - interval, _ := time.ParseDuration(httpCheckConfig.Interval) - deregTTL := getDeregisterTTL(interval) - - host, _, _ := net.SplitHostPort(node.Address) - healthCheckURI := strings.Replace(httpCheckConfig.HTTP, "{host}", host, 1) - - check = &consul.AgentServiceCheck{ - HTTP: healthCheckURI, - Interval: httpCheckConfig.Interval, - Timeout: httpCheckConfig.Timeout, - DeregisterCriticalServiceAfter: fmt.Sprintf("%v", deregTTL), - } - - // if the TTL is greater than 0 create an associated check - } else if options.TTL > time.Duration(0) { - deregTTL := getDeregisterTTL(options.TTL) - - check = &consul.AgentServiceCheck{ - TTL: fmt.Sprintf("%v", options.TTL), - DeregisterCriticalServiceAfter: fmt.Sprintf("%v", deregTTL), - } - } - - host, pt, _ := net.SplitHostPort(node.Address) - if host == "" { - host = node.Address - } - port, _ := strconv.Atoi(pt) - - // register the service - asr := &consul.AgentServiceRegistration{ - ID: node.Id, - Name: s.Name, - Tags: tags, - Port: port, - Address: host, - Meta: node.Metadata, - Check: check, - } - - // Specify consul connect - if c.connect { - asr.Connect = &consul.AgentServiceConnect{ - Native: true, - } - } - - if err := c.Client().Agent().ServiceRegister(asr); err != nil { - return err - } - - // save our hash and time check of the service - c.Lock() - c.register[s.Name] = h - c.lastChecked[s.Name] = time.Now() - c.Unlock() - - // if the TTL is 0 we don't mess with the checks - if options.TTL == time.Duration(0) { - return nil - } - - // pass the healthcheck - return c.Client().Agent().PassTTL("service:"+node.Id, "") -} - -func (c *consulRegistry) GetService(name string, opts ...registry.GetOption) ([]*registry.Service, error) { - var rsp []*consul.ServiceEntry - var err error - - // if we're connect enabled only get connect services - if c.connect { - rsp, _, err = c.Client().Health().Connect(name, "", false, c.queryOptions) - } else { - rsp, _, err = c.Client().Health().Service(name, "", false, c.queryOptions) - } - if err != nil { - return nil, err - } - - serviceMap := map[string]*registry.Service{} - - for _, s := range rsp { - if s.Service.Service != name { - continue - } - - // version is now a tag - version, _ := decodeVersion(s.Service.Tags) - // service ID is now the node id - id := s.Service.ID - // key is always the version - key := version - - // address is service address - address := s.Service.Address - - // use node address - if len(address) == 0 { - address = s.Node.Address - } - - svc, ok := serviceMap[key] - if !ok { - svc = ®istry.Service{ - Endpoints: decodeEndpoints(s.Service.Tags), - Name: s.Service.Service, - Version: version, - } - serviceMap[key] = svc - } - - var del bool - - for _, check := range s.Checks { - // delete the node if the status is critical - if check.Status == "critical" { - del = true - break - } - } - - // if delete then skip the node - if del { - continue - } - - svc.Nodes = append(svc.Nodes, ®istry.Node{ - Id: id, - Address: mnet.HostPort(address, s.Service.Port), - Metadata: decodeMetadata(s.Service.Tags), - }) - } - - var services []*registry.Service - for _, service := range serviceMap { - services = append(services, service) - } - return services, nil -} - -func (c *consulRegistry) ListServices(opts ...registry.ListOption) ([]*registry.Service, error) { - rsp, _, err := c.Client().Catalog().Services(c.queryOptions) - if err != nil { - return nil, err - } - - var services []*registry.Service - - for service := range rsp { - services = append(services, ®istry.Service{Name: service}) - } - - return services, nil -} - -func (c *consulRegistry) Watch(opts ...registry.WatchOption) (registry.Watcher, error) { - return newConsulWatcher(c, opts...) -} - -func (c *consulRegistry) String() string { - return "consul" -} - -func (c *consulRegistry) Options() registry.Options { - return c.opts -} - -func (c *consulRegistry) Client() *consul.Client { - if c.client != nil { - return c.client - } - - for _, addr := range c.Address { - // set the address - c.config.Address = addr - - // create a new client - tmpClient, _ := consul.NewClient(c.config) - - // test the client - _, err := tmpClient.Agent().Host() - if err != nil { - continue - } - - // set the client - c.client = tmpClient - return c.client - } - - // set the default - c.client, _ = consul.NewClient(c.config) - - // return the client - return c.client -} - -func NewRegistry(opts ...registry.Option) registry.Registry { - cr := &consulRegistry{ - opts: registry.Options{}, - register: make(map[string]uint64), - lastChecked: make(map[string]time.Time), - queryOptions: &consul.QueryOptions{ - AllowStale: true, - }, - } - configure(cr, opts...) - return cr -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/consul/encoding.go b/vendor/github.com/go-micro/plugins/v4/registry/consul/encoding.go deleted file mode 100644 index 91a5b323dd6..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/consul/encoding.go +++ /dev/null @@ -1,171 +0,0 @@ -package consul - -import ( - "bytes" - "compress/zlib" - "encoding/hex" - "encoding/json" - "io" - - "go-micro.dev/v4/registry" -) - -func encode(buf []byte) string { - var b bytes.Buffer - defer b.Reset() - - w := zlib.NewWriter(&b) - if _, err := w.Write(buf); err != nil { - return "" - } - w.Close() - - return hex.EncodeToString(b.Bytes()) -} - -func decode(d string) []byte { - hr, err := hex.DecodeString(d) - if err != nil { - return nil - } - - br := bytes.NewReader(hr) - zr, err := zlib.NewReader(br) - if err != nil { - return nil - } - - rbuf, err := io.ReadAll(zr) - if err != nil { - return nil - } - zr.Close() - - return rbuf -} - -func encodeEndpoints(en []*registry.Endpoint) []string { - var tags []string - for _, e := range en { - if b, err := json.Marshal(e); err == nil { - tags = append(tags, "e-"+encode(b)) - } - } - return tags -} - -func decodeEndpoints(tags []string) []*registry.Endpoint { - var en []*registry.Endpoint - - // use the first format you find - var ver byte - - for _, tag := range tags { - if len(tag) == 0 || tag[0] != 'e' { - continue - } - - // check version - if ver > 0 && tag[1] != ver { - continue - } - - var e *registry.Endpoint - var buf []byte - - // Old encoding was plain - if tag[1] == '=' { - buf = []byte(tag[2:]) - } - - // New encoding is hex - if tag[1] == '-' { - buf = decode(tag[2:]) - } - - if err := json.Unmarshal(buf, &e); err == nil { - en = append(en, e) - } - - // set version - ver = tag[1] - } - return en -} - -func encodeMetadata(md map[string]string) []string { - var tags []string - for k, v := range md { - if b, err := json.Marshal(map[string]string{ - k: v, - }); err == nil { - // new encoding - tags = append(tags, "t-"+encode(b)) - } - } - return tags -} - -func decodeMetadata(tags []string) map[string]string { - md := make(map[string]string) - - var ver byte - - for _, tag := range tags { - if len(tag) == 0 || tag[0] != 't' { - continue - } - - // check version - if ver > 0 && tag[1] != ver { - continue - } - - var kv map[string]string - var buf []byte - - // Old encoding was plain - if tag[1] == '=' { - buf = []byte(tag[2:]) - } - - // New encoding is hex - if tag[1] == '-' { - buf = decode(tag[2:]) - } - - // Now unmarshal - if err := json.Unmarshal(buf, &kv); err == nil { - for k, v := range kv { - md[k] = v - } - } - - // set version - ver = tag[1] - } - return md -} - -func encodeVersion(v string) []string { - return []string{"v-" + encode([]byte(v))} -} - -func decodeVersion(tags []string) (string, bool) { - for _, tag := range tags { - if len(tag) < 2 || tag[0] != 'v' { - continue - } - - // Old encoding was plain - if tag[1] == '=' { - return tag[2:], true - } - - // New encoding is hex - if tag[1] == '-' { - return string(decode(tag[2:])), true - } - } - return "", false -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/consul/options.go b/vendor/github.com/go-micro/plugins/v4/registry/consul/options.go deleted file mode 100644 index 854313b946f..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/consul/options.go +++ /dev/null @@ -1,101 +0,0 @@ -package consul - -import ( - "context" - "fmt" - "time" - - consul "github.com/hashicorp/consul/api" - "go-micro.dev/v4/registry" -) - -// Connect specifies services should be registered as Consul Connect services. -func Connect() registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, "consul_connect", true) - } -} - -func Config(c *consul.Config) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, "consul_config", c) - } -} - -// AllowStale sets whether any Consul server (non-leader) can service -// a read. This allows for lower latency and higher throughput -// at the cost of potentially stale data. -// Works similar to Consul DNS Config option [1]. -// Defaults to true. -// -// [1] https://www.consul.io/docs/agent/options.html#allow_stale -func AllowStale(v bool) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, "consul_allow_stale", v) - } -} - -// QueryOptions specifies the QueryOptions to be used when calling -// Consul. See `Consul API` for more information [1]. -// -// [1] https://godoc.org/github.com/hashicorp/consul/api#QueryOptions -func QueryOptions(q *consul.QueryOptions) registry.Option { - return func(o *registry.Options) { - if q == nil { - return - } - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, "consul_query_options", q) - } -} - -// TCPCheck will tell the service provider to check the service address -// and port every `t` interval. It will enabled only if `t` is greater than 0. -// See `TCP + Interval` for more information [1]. -// -// [1] https://www.consul.io/docs/agent/checks.html -func TCPCheck(t time.Duration) registry.Option { - return func(o *registry.Options) { - if t <= time.Duration(0) { - return - } - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, "consul_tcp_check", t) - } -} - -// HTTPCheck will tell the service provider to invoke the health check endpoint -// with an interval and timeout. It will be enabled only if interval and -// timeout are greater than 0. -// See `HTTP + Interval` for more information [1]. -// -// [1] https://www.consul.io/docs/agent/checks.html -func HTTPCheck(protocol, port, httpEndpoint string, interval, timeout time.Duration) registry.Option { - return func(o *registry.Options) { - if interval <= time.Duration(0) || timeout <= time.Duration(0) { - return - } - if o.Context == nil { - o.Context = context.Background() - } - check := consul.AgentServiceCheck{ - HTTP: fmt.Sprintf("%s://{host}:%s%s", protocol, port, httpEndpoint), - Interval: fmt.Sprintf("%v", interval), - Timeout: fmt.Sprintf("%v", timeout), - } - o.Context = context.WithValue(o.Context, "consul_http_check_config", check) - } -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/consul/watcher.go b/vendor/github.com/go-micro/plugins/v4/registry/consul/watcher.go deleted file mode 100644 index 133e5c92f38..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/consul/watcher.go +++ /dev/null @@ -1,299 +0,0 @@ -package consul - -import ( - "fmt" - "net" - "sync" - - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/api/watch" - "go-micro.dev/v4/registry" - regutil "go-micro.dev/v4/util/registry" -) - -type consulWatcher struct { - r *consulRegistry - wo registry.WatchOptions - wp *watch.Plan - watchers map[string]*watch.Plan - - next chan *registry.Result - exit chan bool - - sync.RWMutex - services map[string][]*registry.Service -} - -func newConsulWatcher(cr *consulRegistry, opts ...registry.WatchOption) (registry.Watcher, error) { - var wo registry.WatchOptions - for _, o := range opts { - o(&wo) - } - - cw := &consulWatcher{ - r: cr, - wo: wo, - exit: make(chan bool), - next: make(chan *registry.Result, 10), - watchers: make(map[string]*watch.Plan), - services: make(map[string][]*registry.Service), - } - - wp, err := watch.Parse(map[string]interface{}{ - "service": wo.Service, - "type": "service", - }) - if err != nil { - return nil, err - } - - wp.Handler = cw.serviceHandler - go wp.RunWithClientAndHclog(cr.Client(), wp.Logger) - cw.wp = wp - - return cw, nil -} - -func (cw *consulWatcher) serviceHandler(idx uint64, data interface{}) { - entries, ok := data.([]*api.ServiceEntry) - if !ok { - return - } - - serviceMap := map[string]*registry.Service{} - serviceName := "" - - for _, e := range entries { - serviceName = e.Service.Service - // version is now a tag - version, _ := decodeVersion(e.Service.Tags) - // service ID is now the node id - id := e.Service.ID - // key is always the version - key := version - // address is service address - address := e.Service.Address - - // use node address - if len(address) == 0 { - address = e.Node.Address - } - - svc, ok := serviceMap[key] - if !ok { - svc = ®istry.Service{ - Endpoints: decodeEndpoints(e.Service.Tags), - Name: e.Service.Service, - Version: version, - } - serviceMap[key] = svc - } - - var del bool - - for _, check := range e.Checks { - // delete the node if the status is critical - if check.Status == "critical" { - del = true - break - } - } - - // if delete then skip the node - if del { - continue - } - - svc.Nodes = append(svc.Nodes, ®istry.Node{ - Id: id, - Address: net.JoinHostPort(address, fmt.Sprint(e.Service.Port)), - Metadata: decodeMetadata(e.Service.Tags), - }) - } - - cw.RLock() - // make a copy - rservices := make(map[string][]*registry.Service) - for k, v := range cw.services { - rservices[k] = v - } - cw.RUnlock() - - var newServices []*registry.Service - - // serviceMap is the new set of services keyed by name+version - for _, newService := range serviceMap { - // append to the new set of cached services - newServices = append(newServices, newService) - - // check if the service exists in the existing cache - oldServices, ok := rservices[serviceName] - if !ok { - // does not exist? then we're creating brand new entries - cw.next <- ®istry.Result{Action: "create", Service: newService} - continue - } - - // service exists. ok let's figure out what to update and delete version wise - action := "create" - - for _, oldService := range oldServices { - // does this version exist? - // no? then default to create - if oldService.Version != newService.Version { - continue - } - - // yes? then it's an update - action = "update" - - var nodes []*registry.Node - // check the old nodes to see if they've been deleted - for _, oldNode := range oldService.Nodes { - var seen bool - for _, newNode := range newService.Nodes { - if newNode.Id == oldNode.Id { - seen = true - break - } - } - // does the old node exist in the new set of nodes - // no? then delete that shit - if !seen { - nodes = append(nodes, oldNode) - } - } - - // it's an update rather than creation - if len(nodes) > 0 { - delService := regutil.CopyService(oldService) - delService.Nodes = nodes - cw.next <- ®istry.Result{Action: "delete", Service: delService} - } - } - - cw.next <- ®istry.Result{Action: action, Service: newService} - } - - // Now check old versions that may not be in new services map - for _, old := range rservices[serviceName] { - // old version does not exist in new version map - // kill it with fire! - if _, ok := serviceMap[old.Version]; !ok { - cw.next <- ®istry.Result{Action: "delete", Service: old} - } - } - - // there are no services in the service, empty all services - if len(rservices) != 0 && serviceName == "" { - for _, services := range rservices { - for _, service := range services { - cw.next <- ®istry.Result{Action: "delete", Service: service} - } - } - } - - cw.Lock() - cw.services[serviceName] = newServices - cw.Unlock() -} - -func (cw *consulWatcher) handle(idx uint64, data interface{}) { - services, ok := data.(map[string][]string) - if !ok { - return - } - - // add new watchers - for service := range services { - // Filter on watch options - // wo.Service: Only watch services we care about - if len(cw.wo.Service) > 0 && service != cw.wo.Service { - continue - } - - if _, ok := cw.watchers[service]; ok { - continue - } - wp, err := watch.Parse(map[string]interface{}{ - "type": "service", - "service": service, - }) - if err == nil { - wp.Handler = cw.serviceHandler - go wp.RunWithClientAndHclog(cw.r.Client(), wp.Logger) - cw.watchers[service] = wp - cw.next <- ®istry.Result{Action: "create", Service: ®istry.Service{Name: service}} - } - } - - cw.RLock() - // make a copy - rservices := make(map[string][]*registry.Service) - for k, v := range cw.services { - rservices[k] = v - } - cw.RUnlock() - - // remove unknown services from registry - // save the things we want to delete - deleted := make(map[string][]*registry.Service) - - for service := range rservices { - if _, ok := services[service]; !ok { - cw.Lock() - // save this before deleting - deleted[service] = cw.services[service] - delete(cw.services, service) - cw.Unlock() - } - } - - // remove unknown services from watchers - for service, w := range cw.watchers { - if _, ok := services[service]; !ok { - w.Stop() - delete(cw.watchers, service) - for _, oldService := range deleted[service] { - // send a delete for the service nodes that we're removing - cw.next <- ®istry.Result{Action: "delete", Service: oldService} - } - // sent the empty list as the last resort to indicate to delete the entire service - cw.next <- ®istry.Result{Action: "delete", Service: ®istry.Service{Name: service}} - } - } -} - -func (cw *consulWatcher) Next() (*registry.Result, error) { - select { - case <-cw.exit: - return nil, registry.ErrWatcherStopped - case r, ok := <-cw.next: - if !ok { - return nil, registry.ErrWatcherStopped - } - return r, nil - } -} - -func (cw *consulWatcher) Stop() { - select { - case <-cw.exit: - return - default: - close(cw.exit) - if cw.wp == nil { - return - } - cw.wp.Stop() - - // drain results - for { - select { - case <-cw.next: - default: - return - } - } - } -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/etcd/LICENSE b/vendor/github.com/go-micro/plugins/v4/registry/etcd/LICENSE deleted file mode 100644 index 7d74e027ca9..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/etcd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Asim Aslam. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-micro/plugins/v4/registry/etcd/etcd.go b/vendor/github.com/go-micro/plugins/v4/registry/etcd/etcd.go deleted file mode 100644 index 2fb8b212c03..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/etcd/etcd.go +++ /dev/null @@ -1,423 +0,0 @@ -// Package etcd provides an etcd service registry -package etcd - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "net" - "os" - "path" - "sort" - "strings" - "sync" - "time" - - hash "github.com/mitchellh/hashstructure" - "go-micro.dev/v4/logger" - "go-micro.dev/v4/registry" - "go-micro.dev/v4/util/cmd" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.uber.org/zap" -) - -var ( - prefix = "/micro/registry/" -) - -type etcdRegistry struct { - client *clientv3.Client - options registry.Options - - sync.RWMutex - register map[string]uint64 - leases map[string]clientv3.LeaseID -} - -func init() { - cmd.DefaultRegistries["etcd"] = NewRegistry -} - -func NewRegistry(opts ...registry.Option) registry.Registry { - e := &etcdRegistry{ - options: registry.Options{}, - register: make(map[string]uint64), - leases: make(map[string]clientv3.LeaseID), - } - username, password := os.Getenv("ETCD_USERNAME"), os.Getenv("ETCD_PASSWORD") - if len(username) > 0 && len(password) > 0 { - opts = append(opts, Auth(username, password)) - } - address := os.Getenv("MICRO_REGISTRY_ADDRESS") - if len(address) > 0 { - opts = append(opts, registry.Addrs(address)) - } - configure(e, opts...) - return e -} - -func configure(e *etcdRegistry, opts ...registry.Option) error { - config := clientv3.Config{ - Endpoints: []string{"127.0.0.1:2379"}, - } - - for _, o := range opts { - o(&e.options) - } - - if e.options.Timeout == 0 { - e.options.Timeout = 5 * time.Second - } - - if e.options.Logger == nil { - e.options.Logger = logger.DefaultLogger - } - - config.DialTimeout = e.options.Timeout - - if e.options.Secure || e.options.TLSConfig != nil { - tlsConfig := e.options.TLSConfig - if tlsConfig == nil { - tlsConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } - - config.TLS = tlsConfig - } - - if e.options.Context != nil { - u, ok := e.options.Context.Value(authKey{}).(*authCreds) - if ok { - config.Username = u.Username - config.Password = u.Password - } - cfg, ok := e.options.Context.Value(logConfigKey{}).(*zap.Config) - if ok && cfg != nil { - config.LogConfig = cfg - } - } - - var cAddrs []string - - for _, address := range e.options.Addrs { - if len(address) == 0 { - continue - } - addr, port, err := net.SplitHostPort(address) - if ae, ok := err.(*net.AddrError); ok && ae.Err == "missing port in address" { - port = "2379" - addr = address - cAddrs = append(cAddrs, net.JoinHostPort(addr, port)) - } else if err == nil { - cAddrs = append(cAddrs, net.JoinHostPort(addr, port)) - } - } - - // if we got addrs then we'll update - if len(cAddrs) > 0 { - config.Endpoints = cAddrs - } - - cli, err := clientv3.New(config) - if err != nil { - return err - } - e.client = cli - return nil -} - -func encode(s *registry.Service) string { - b, _ := json.Marshal(s) - return string(b) -} - -func decode(ds []byte) *registry.Service { - var s *registry.Service - json.Unmarshal(ds, &s) - return s -} - -func nodePath(s, id string) string { - service := strings.Replace(s, "/", "-", -1) - node := strings.Replace(id, "/", "-", -1) - return path.Join(prefix, service, node) -} - -func servicePath(s string) string { - return path.Join(prefix, strings.Replace(s, "/", "-", -1)) -} - -func (e *etcdRegistry) Init(opts ...registry.Option) error { - return configure(e, opts...) -} - -func (e *etcdRegistry) Options() registry.Options { - return e.options -} - -func (e *etcdRegistry) registerNode(s *registry.Service, node *registry.Node, opts ...registry.RegisterOption) error { - if len(s.Nodes) == 0 { - return errors.New("Require at least one node") - } - - // check existing lease cache - e.RLock() - leaseID, ok := e.leases[s.Name+node.Id] - e.RUnlock() - - log := e.options.Logger - - if !ok { - // missing lease, check if the key exists - ctx, cancel := context.WithTimeout(context.Background(), e.options.Timeout) - defer cancel() - - // look for the existing key - rsp, err := e.client.Get(ctx, nodePath(s.Name, node.Id), clientv3.WithSerializable()) - if err != nil { - return err - } - - // get the existing lease - for _, kv := range rsp.Kvs { - if kv.Lease > 0 { - leaseID = clientv3.LeaseID(kv.Lease) - - // decode the existing node - srv := decode(kv.Value) - if srv == nil || len(srv.Nodes) == 0 { - continue - } - - // create hash of service; uint64 - h, err := hash.Hash(srv.Nodes[0], nil) - if err != nil { - continue - } - - // save the info - e.Lock() - e.leases[s.Name+node.Id] = leaseID - e.register[s.Name+node.Id] = h - e.Unlock() - - break - } - } - } - - var leaseNotFound bool - - // renew the lease if it exists - if leaseID > 0 { - log.Logf(logger.TraceLevel, "Renewing existing lease for %s %d", s.Name, leaseID) - if _, err := e.client.KeepAliveOnce(context.TODO(), leaseID); err != nil { - if err != rpctypes.ErrLeaseNotFound { - return err - } - - log.Logf(logger.TraceLevel, "Lease not found for %s %d", s.Name, leaseID) - // lease not found do register - leaseNotFound = true - } - } - - // create hash of service; uint64 - h, err := hash.Hash(node, nil) - if err != nil { - return err - } - - // get existing hash for the service node - e.Lock() - v, ok := e.register[s.Name+node.Id] - e.Unlock() - - // the service is unchanged, skip registering - if ok && v == h && !leaseNotFound { - log.Logf(logger.TraceLevel, "Service %s node %s unchanged skipping registration", s.Name, node.Id) - return nil - } - - service := ®istry.Service{ - Name: s.Name, - Version: s.Version, - Metadata: s.Metadata, - Endpoints: s.Endpoints, - Nodes: []*registry.Node{node}, - } - - var options registry.RegisterOptions - for _, o := range opts { - o(&options) - } - - ctx, cancel := context.WithTimeout(context.Background(), e.options.Timeout) - defer cancel() - - var lgr *clientv3.LeaseGrantResponse - if options.TTL.Seconds() > 0 { - // get a lease used to expire keys since we have a ttl - lgr, err = e.client.Grant(ctx, int64(options.TTL.Seconds())) - if err != nil { - return err - } - } - - log.Logf(logger.TraceLevel, "Registering %s id %s with lease %v and leaseID %v and ttl %v", service.Name, node.Id, lgr, lgr.ID, options.TTL) - // create an entry for the node - if lgr != nil { - _, err = e.client.Put(ctx, nodePath(service.Name, node.Id), encode(service), clientv3.WithLease(lgr.ID)) - } else { - _, err = e.client.Put(ctx, nodePath(service.Name, node.Id), encode(service)) - } - if err != nil { - return err - } - - e.Lock() - // save our hash of the service - e.register[s.Name+node.Id] = h - // save our leaseID of the service - if lgr != nil { - e.leases[s.Name+node.Id] = lgr.ID - } - e.Unlock() - - return nil -} - -func (e *etcdRegistry) Deregister(s *registry.Service, opts ...registry.DeregisterOption) error { - if len(s.Nodes) == 0 { - return errors.New("Require at least one node") - } - - for _, node := range s.Nodes { - e.Lock() - // delete our hash of the service - delete(e.register, s.Name+node.Id) - // delete our lease of the service - delete(e.leases, s.Name+node.Id) - e.Unlock() - - ctx, cancel := context.WithTimeout(context.Background(), e.options.Timeout) - defer cancel() - - e.options.Logger.Logf(logger.TraceLevel, "Deregistering %s id %s", s.Name, node.Id) - _, err := e.client.Delete(ctx, nodePath(s.Name, node.Id)) - if err != nil { - return err - } - } - - return nil -} - -func (e *etcdRegistry) Register(s *registry.Service, opts ...registry.RegisterOption) error { - if len(s.Nodes) == 0 { - return errors.New("Require at least one node") - } - - var gerr error - - // register each node individually - for _, node := range s.Nodes { - err := e.registerNode(s, node, opts...) - if err != nil { - gerr = err - } - } - - return gerr -} - -func (e *etcdRegistry) GetService(name string, opts ...registry.GetOption) ([]*registry.Service, error) { - ctx, cancel := context.WithTimeout(context.Background(), e.options.Timeout) - defer cancel() - - rsp, err := e.client.Get(ctx, servicePath(name)+"/", clientv3.WithPrefix(), clientv3.WithSerializable()) - if err != nil { - return nil, err - } - - if len(rsp.Kvs) == 0 { - return nil, registry.ErrNotFound - } - - serviceMap := map[string]*registry.Service{} - - for _, n := range rsp.Kvs { - if sn := decode(n.Value); sn != nil { - s, ok := serviceMap[sn.Version] - if !ok { - s = ®istry.Service{ - Name: sn.Name, - Version: sn.Version, - Metadata: sn.Metadata, - Endpoints: sn.Endpoints, - } - serviceMap[s.Version] = s - } - - s.Nodes = append(s.Nodes, sn.Nodes...) - } - } - - services := make([]*registry.Service, 0, len(serviceMap)) - for _, service := range serviceMap { - services = append(services, service) - } - - return services, nil -} - -func (e *etcdRegistry) ListServices(opts ...registry.ListOption) ([]*registry.Service, error) { - versions := make(map[string]*registry.Service) - - ctx, cancel := context.WithTimeout(context.Background(), e.options.Timeout) - defer cancel() - - rsp, err := e.client.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithSerializable()) - if err != nil { - return nil, err - } - - if len(rsp.Kvs) == 0 { - return []*registry.Service{}, nil - } - - for _, n := range rsp.Kvs { - sn := decode(n.Value) - if sn == nil { - continue - } - v, ok := versions[sn.Name+sn.Version] - if !ok { - versions[sn.Name+sn.Version] = sn - continue - } - // append to service:version nodes - v.Nodes = append(v.Nodes, sn.Nodes...) - } - - services := make([]*registry.Service, 0, len(versions)) - for _, service := range versions { - services = append(services, service) - } - - // sort the services - sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name }) - - return services, nil -} - -func (e *etcdRegistry) Watch(opts ...registry.WatchOption) (registry.Watcher, error) { - return newEtcdWatcher(e, e.options.Timeout, opts...) -} - -func (e *etcdRegistry) String() string { - return "etcd" -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/etcd/options.go b/vendor/github.com/go-micro/plugins/v4/registry/etcd/options.go deleted file mode 100644 index 52fc55e3a1e..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/etcd/options.go +++ /dev/null @@ -1,37 +0,0 @@ -package etcd - -import ( - "context" - - "go-micro.dev/v4/registry" - "go.uber.org/zap" -) - -type authKey struct{} - -type logConfigKey struct{} - -type authCreds struct { - Username string - Password string -} - -// Auth allows you to specify username/password. -func Auth(username, password string) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, authKey{}, &authCreds{Username: username, Password: password}) - } -} - -// LogConfig allows you to set etcd log config. -func LogConfig(config *zap.Config) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, logConfigKey{}, config) - } -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/etcd/watcher.go b/vendor/github.com/go-micro/plugins/v4/registry/etcd/watcher.go deleted file mode 100644 index 964849dcd32..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/etcd/watcher.go +++ /dev/null @@ -1,91 +0,0 @@ -package etcd - -import ( - "context" - "errors" - "time" - - "go-micro.dev/v4/registry" - clientv3 "go.etcd.io/etcd/client/v3" -) - -type etcdWatcher struct { - stop chan bool - w clientv3.WatchChan - client *clientv3.Client - timeout time.Duration -} - -func newEtcdWatcher(r *etcdRegistry, timeout time.Duration, opts ...registry.WatchOption) (registry.Watcher, error) { - var wo registry.WatchOptions - for _, o := range opts { - o(&wo) - } - - ctx, cancel := context.WithCancel(context.Background()) - stop := make(chan bool, 1) - - go func() { - <-stop - cancel() - }() - - watchPath := prefix - if len(wo.Service) > 0 { - watchPath = servicePath(wo.Service) + "/" - } - - return &etcdWatcher{ - stop: stop, - w: r.client.Watch(ctx, watchPath, clientv3.WithPrefix(), clientv3.WithPrevKV()), - client: r.client, - timeout: timeout, - }, nil -} - -func (ew *etcdWatcher) Next() (*registry.Result, error) { - for wresp := range ew.w { - if wresp.Err() != nil { - return nil, wresp.Err() - } - if wresp.Canceled { - return nil, errors.New("could not get next") - } - for _, ev := range wresp.Events { - service := decode(ev.Kv.Value) - var action string - - switch ev.Type { - case clientv3.EventTypePut: - if ev.IsCreate() { - action = "create" - } else if ev.IsModify() { - action = "update" - } - case clientv3.EventTypeDelete: - action = "delete" - - // get service from prevKv - service = decode(ev.PrevKv.Value) - } - - if service == nil { - continue - } - return ®istry.Result{ - Action: action, - Service: service, - }, nil - } - } - return nil, errors.New("could not get next") -} - -func (ew *etcdWatcher) Stop() { - select { - case <-ew.stop: - return - default: - close(ew.stop) - } -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/LICENSE b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/LICENSE deleted file mode 100644 index 7d74e027ca9..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Asim Aslam. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/README.md b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/README.md deleted file mode 100644 index 80282db46fb..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Kubernetes Registry Plugin for micro -This is a plugin for go-micro that allows you to use Kubernetes as a registry. - - -## Overview -This registry plugin makes use of Annotations and Labels on a Kubernetes pod -to build a service discovery mechanism. - - -## RBAC -If your Kubernetes cluster has RBAC enabled, a role and role binding -will need to be created to allow this plugin to `list` and `patch` pods. - -A cluster role can be used to specify the `list` and `patch` -requirements, while a role binding per namespace can be used to apply -the cluster role. The example RBAC configs below assume your Micro-based -services are running in the `test` namespace, and the pods that contain -the services are using the `micro-services` service account. - -``` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: micro-registry -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - list - - patch - - watch -``` - -``` -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: micro-registry -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: micro-registry -subjects: -- kind: ServiceAccount - name: micro-services - namespace: test -``` - - -## Gotchas -* Registering/Deregistering relies on the HOSTNAME Environment Variable, which inside a pod -is the place where it can be retrieved from. (This needs improving) - - -## Connecting to the Kubernetes API -### Within a pod -If the `--registry_address` flag is omitted, the plugin will securely connect to -the Kubernetes API using the pods "Service Account". No extra configuration is necessary. - -Find out more about service accounts here. http://kubernetes.io/docs/user-guide/accessing-the-cluster/ - -### Outside of Kubernetes -Some functions of the plugin should work, but its not been heavily tested. -Currently no TLS support. diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/request.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/request.go deleted file mode 100644 index 4cb92c5bddd..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/request.go +++ /dev/null @@ -1,220 +0,0 @@ -// Package api ... -package api - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - - "github.com/go-micro/plugins/v4/registry/kubernetes/client/watch" -) - -// Request is used to construct a http request for the k8s API. -type Request struct { - client *http.Client - header http.Header - params url.Values - method string - host string - namespace string - - resource string - resourceName *string - body io.Reader - - err error -} - -// Params is the object to pass in to set parameters -// on a request. -type Params struct { - LabelSelector map[string]string - Watch bool -} - -// Options ... -type Options struct { - Host string - Namespace string - BearerToken *string - Client *http.Client -} - -// NewRequest creates a k8s api request. -func NewRequest(opts *Options) *Request { - req := Request{ - header: make(http.Header), - params: make(url.Values), - client: opts.Client, - namespace: opts.Namespace, - host: opts.Host, - } - - if opts.BearerToken != nil { - req.SetHeader("Authorization", "Bearer "+*opts.BearerToken) - } - - return &req -} - -// verb sets method. -func (r *Request) verb(method string) *Request { - r.method = method - return r -} - -// Get request. -func (r *Request) Get() *Request { - return r.verb("GET") -} - -// Post request. -func (r *Request) Post() *Request { - return r.verb("POST") -} - -// Put request. -func (r *Request) Put() *Request { - return r.verb("PUT") -} - -// Patch request -// https://github.com/kubernetes/kubernetes/blob/master/docs/devel/api-conventions.md#patch-operations -func (r *Request) Patch() *Request { - return r.verb("PATCH").SetHeader("Content-Type", "application/strategic-merge-patch+json") -} - -// Delete request. -func (r *Request) Delete() *Request { - return r.verb("DELETE") -} - -// Namespace is to set the namespace to operate on. -func (r *Request) Namespace(s string) *Request { - r.namespace = s - return r -} - -// Resource is the type of resource the operation is -// for, such as "services", "endpoints" or "pods". -func (r *Request) Resource(s string) *Request { - r.resource = s - return r -} - -// Name is for targeting a specific resource by id. -func (r *Request) Name(s string) *Request { - r.resourceName = &s - return r -} - -// Body pass in a body to set, this is for POST, PUT -// and PATCH requests. -func (r *Request) Body(in interface{}) *Request { - b := new(bytes.Buffer) - if err := json.NewEncoder(b).Encode(&in); err != nil { - r.err = err - return r - } - - r.body = b - - return r -} - -// Params isused to set parameters on a request. -func (r *Request) Params(p *Params) *Request { - for k, v := range p.LabelSelector { - // create new key=value pair - value := fmt.Sprintf("%s=%s", k, v) - // check if there's an existing value - if label := r.params.Get("labelSelector"); len(label) > 0 { - value = fmt.Sprintf("%s,%s", label, value) - } - // set and overwrite the value - r.params.Set("labelSelector", value) - } - - return r -} - -// SetHeader sets a header on a request with -// a `key` and `value`. -func (r *Request) SetHeader(key, value string) *Request { - r.header.Add(key, value) - return r -} - -// request builds the http.Request from the options. -func (r *Request) request() (*http.Request, error) { - url := fmt.Sprintf("%s/api/v1/namespaces/%s/%s/", r.host, r.namespace, r.resource) - - // append resourceName if it is present - if r.resourceName != nil { - url += *r.resourceName - } - - // append any query params - if len(r.params) > 0 { - url += "?" + r.params.Encode() - } - - // build request - req, err := http.NewRequest(r.method, url, r.body) - if err != nil { - return nil, err - } - - // set headers on request - req.Header = r.header - - return req, nil -} - -// Do builds and triggers the request. -func (r *Request) Do() *Response { - if r.err != nil { - return &Response{ - err: r.err, - } - } - - req, err := r.request() - if err != nil { - return &Response{ - err: err, - } - } - - res, err := r.client.Do(req) - if err != nil { - return &Response{ - err: err, - } - } - - // return res, err - return newResponse(res, err) -} - -// Watch builds and triggers the request, but will watch instead of return -// an object. -func (r *Request) Watch() (watch.Watch, error) { - if r.err != nil { - return nil, r.err - } - - r.params.Set("watch", "true") - - req, err := r.request() - if err != nil { - return nil, err - } - - w, err := watch.NewBodyWatcher(req, r.client) - - return w, err -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/response.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/response.go deleted file mode 100644 index 27305e163c2..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/api/response.go +++ /dev/null @@ -1,90 +0,0 @@ -package api - -import ( - "encoding/json" - "io" - "net/http" - - "github.com/pkg/errors" - log "go-micro.dev/v4/logger" -) - -// Errors ... -var ( - ErrNoPodName = errors.New("no pod name provided") - ErrNotFound = errors.New("pod not found") - ErrDecode = errors.New("error decoding") - ErrOther = errors.New("unspecified error occurred in k8s registry") -) - -// Response ... -type Response struct { - res *http.Response - err error -} - -// Error returns an error. -func (r *Response) Error() error { - return r.err -} - -// StatusCode returns status code for response. -func (r *Response) StatusCode() int { - return r.res.StatusCode -} - -// Decode decodes body into `data`. -func (r *Response) Decode(data interface{}) error { - if r.err != nil { - return r.err - } - - var err error - defer func() { - nerr := r.res.Body.Close() - if err != nil { - err = nerr - } - }() - - decoder := json.NewDecoder(r.res.Body) - - if err := decoder.Decode(&data); err != nil { - return errors.Wrap(ErrDecode, err.Error()) - } - - return r.err -} - -func newResponse(r *http.Response, err error) *Response { - resp := &Response{ - res: r, - err: err, - } - - if err != nil { - return resp - } - - // Check if request is successful. - s := resp.res.StatusCode - if s == http.StatusOK || s == http.StatusCreated || s == http.StatusNoContent { - return resp - } - - if resp.res.StatusCode == http.StatusNotFound { - resp.err = ErrNotFound - return resp - } - - log.Errorf("K8s: request failed with code %v", resp.res.StatusCode) - - b, err := io.ReadAll(resp.res.Body) - if err == nil { - log.Errorf("K8s: request failed with body: %s", string(b)) - } - - resp.err = ErrOther - - return resp -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/client.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/client.go deleted file mode 100644 index 989c151082b..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/client.go +++ /dev/null @@ -1,142 +0,0 @@ -// Package client is the kubernetes registry client. -package client - -import ( - "crypto/tls" - "errors" - "net/http" - "os" - "path" - - "go-micro.dev/v4/logger" - - "github.com/go-micro/plugins/v4/registry/kubernetes/client/api" - "github.com/go-micro/plugins/v4/registry/kubernetes/client/watch" -) - -var ( - serviceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount" - - // ErrReadNamespace error when failed to read namespace. - ErrReadNamespace = errors.New("could not read namespace from service account secret") -) - -// Client ... -type client struct { - opts *api.Options -} - -// NewClientByHost sets up a client by host. -func NewClientByHost(host string) Kubernetes { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{ - //nolint:gosec - InsecureSkipVerify: true, - }, - DisableCompression: true, - } - - c := &http.Client{ - Transport: tr, - } - - return &client{ - opts: &api.Options{ - Client: c, - Host: host, - Namespace: "default", - }, - } -} - -// NewClientInCluster should work similarly to the official api -// NewInClient by setting up a client configuration for use within -// a k8s pod. -func NewClientInCluster() Kubernetes { - host := "https://" + os.Getenv("KUBERNETES_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_SERVICE_PORT") - - s, err := os.Stat(serviceAccountPath) - if err != nil { - logger.Fatal(err) - } - - if s == nil || !s.IsDir() { - logger.Fatal(errors.New("no k8s service account found")) - } - - t, err := os.ReadFile(path.Join(serviceAccountPath, "token")) - if err != nil { - logger.Fatal(err) - } - - token := string(t) - - ns, err := detectNamespace() - if err != nil { - logger.Fatal(err) - } - - crt, err := CertPoolFromFile(path.Join(serviceAccountPath, "ca.crt")) - if err != nil { - logger.Fatal(err) - } - - c := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: crt, - MinVersion: tls.VersionTLS12, - }, - DisableCompression: true, - }, - } - - return &client{ - opts: &api.Options{ - Client: c, - Host: host, - Namespace: ns, - BearerToken: &token, - }, - } -} - -// ListPods ... -func (c *client) ListPods(labels map[string]string) (*PodList, error) { - var pods PodList - err := api.NewRequest(c.opts).Get().Resource("pods").Params(&api.Params{LabelSelector: labels}).Do().Decode(&pods) - - return &pods, err -} - -// UpdatePod ... -func (c *client) UpdatePod(name string, p *Pod) (*Pod, error) { - var pod Pod - err := api.NewRequest(c.opts).Patch().Resource("pods").Name(name).Body(p).Do().Decode(&pod) - - return &pod, err -} - -// WatchPods ... -func (c *client) WatchPods(labels map[string]string) (watch.Watch, error) { - return api.NewRequest(c.opts).Get().Resource("pods").Params(&api.Params{LabelSelector: labels}).Watch() -} - -func detectNamespace() (string, error) { - nsPath := path.Join(serviceAccountPath, "namespace") - - // Make sure it's a file and we can read it - if s, err := os.Stat(nsPath); err != nil { - return "", err - } else if s.IsDir() { - return "", ErrReadNamespace - } - - // Read the file, and cast to a string - ns, err := os.ReadFile(path.Clean(nsPath)) - if err != nil { - return string(ns), err - } - - return string(ns), nil -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/kubernetes.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/kubernetes.go deleted file mode 100644 index 2a07ab4ccec..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/kubernetes.go +++ /dev/null @@ -1,35 +0,0 @@ -package client - -import "github.com/go-micro/plugins/v4/registry/kubernetes/client/watch" - -// Kubernetes ... -type Kubernetes interface { - ListPods(labels map[string]string) (*PodList, error) - UpdatePod(podName string, pod *Pod) (*Pod, error) - WatchPods(labels map[string]string) (watch.Watch, error) -} - -// PodList ... -type PodList struct { - Items []Pod `json:"items"` -} - -// Pod is the top level item for a pod. -type Pod struct { - Metadata *Meta `json:"metadata"` - Status *Status `json:"status"` -} - -// Meta ... -type Meta struct { - Name string `json:"name,omitempty"` - Labels map[string]*string `json:"labels,omitempty"` - Annotations map[string]*string `json:"annotations,omitempty"` - DeletionTimestamp string `json:"deletionTimestamp,omitempty"` -} - -// Status ... -type Status struct { - PodIP string `json:"podIP"` - Phase string `json:"phase"` -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/utils.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/utils.go deleted file mode 100644 index 17de97b2de3..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -package client - -import ( - "crypto/x509" - "encoding/pem" - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// COPIED FROM -// https://github.com/kubernetes/kubernetes/blob/7a725418af4661067b56506faabc2d44c6d7703a/pkg/util/crypto/crypto.go - -// CertPoolFromFile returns an x509.CertPool containing the certificates in -// the given PEM-encoded file. Returns an error if the file could not be read, -// a certificate could not be parsed, or if the file does not contain any certificates. -func CertPoolFromFile(filename string) (*x509.CertPool, error) { - certs, err := certificatesFromFile(filename) - if err != nil { - return nil, err - } - - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - - return pool, nil -} - -// certificatesFromFile returns the x509.Certificates contained in the given -// PEM-encoded file. Returns an error if the file could not be read, a -// certificate could not be parsed, or if the file does not contain any certificates. -func certificatesFromFile(file string) ([]*x509.Certificate, error) { - if len(file) == 0 { - return nil, errors.New("error reading certificates from an empty filename") - } - - pemBlock, err := os.ReadFile(filepath.Clean(file)) - if err != nil { - return nil, err - } - - certs, err := CertsFromPEM(pemBlock) - if err != nil { - return nil, errors.Wrapf(err, "error reading %s", file) - } - - return certs, nil -} - -// CertsFromPEM returns the x509.Certificates contained in the given PEM-encoded -// byte array. Returns an error if a certificate could not be parsed, or if the -// data does not contain any certificates. -func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { - ok := false - certs := []*x509.Certificate{} - - for len(pemCerts) > 0 { - var block *pem.Block - block, pemCerts = pem.Decode(pemCerts) - - if block == nil { - break - } - - // Only use PEM "CERTIFICATE" blocks without extra headers - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return certs, err - } - - certs = append(certs, cert) - ok = true - } - - if !ok { - return certs, errors.New("could not read any certificates") - } - - return certs, nil -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/body.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/body.go deleted file mode 100644 index 4570a3e8de9..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/body.go +++ /dev/null @@ -1,109 +0,0 @@ -package watch - -import ( - "bufio" - "context" - "encoding/json" - "net/http" - "sync/atomic" - "time" - - "github.com/pkg/errors" -) - -// bodyWatcher scans the body of a request for chunks. -type bodyWatcher struct { - ctx context.Context - stop context.CancelFunc - results chan Event - res *http.Response - req *http.Request -} - -// Changes returns the results channel. -func (wr *bodyWatcher) ResultChan() <-chan Event { - return wr.results -} - -// Stop cancels the request. -func (wr *bodyWatcher) Stop() { - select { - case <-wr.ctx.Done(): - return - default: - wr.stop() - } -} - -func (wr *bodyWatcher) stream() { - reader := bufio.NewReader(wr.res.Body) - - // ignore first few messages from stream, - // as they are usually old. - var ignore atomic.Bool - - go func() { - <-time.After(time.Second) - ignore.Store(false) - }() - - go func() { - //nolint:errcheck - defer wr.res.Body.Close() - out: - for { - // Read a line - b, err := reader.ReadBytes('\n') - if err != nil { - break - } - - // Ignore for the first second - if ignore.Load() { - continue - } - - // Send the event - var event Event - if err := json.Unmarshal(b, &event); err != nil { - continue - } - - select { - case <-wr.ctx.Done(): - break out - case wr.results <- event: - } - } - - close(wr.results) - // stop the watcher - wr.Stop() - }() -} - -// NewBodyWatcher creates a k8s body watcher for a given http request. -func NewBodyWatcher(req *http.Request, client *http.Client) (Watch, error) { - ctx, cancel := context.WithCancel(context.Background()) - - req = req.WithContext(ctx) - - //nolint:bodyclose - res, err := client.Do(req) - if err != nil { - cancel() - return nil, errors.Wrap(err, "body watcher failed to make http request") - } - - wr := &bodyWatcher{ - ctx: ctx, - results: make(chan Event), - stop: cancel, - req: req, - res: res, - } - - go wr.stream() - - return wr, nil -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/watch.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/watch.go deleted file mode 100644 index e3778cf5d26..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/client/watch/watch.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package watch implements the k8s watcher. -package watch - -import "encoding/json" - -// Watch ... -type Watch interface { - Stop() - ResultChan() <-chan Event -} - -// EventType defines the possible types of events. -type EventType string - -// EventTypes used. -const ( - Added EventType = "ADDED" - Modified EventType = "MODIFIED" - Deleted EventType = "DELETED" - Error EventType = "ERROR" -) - -// Event represents a single event to a watched resource. -type Event struct { - Type EventType `json:"type"` - Object json.RawMessage `json:"object"` -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/kubernetes.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/kubernetes.go deleted file mode 100644 index a76113abc6a..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/kubernetes.go +++ /dev/null @@ -1,321 +0,0 @@ -// Package kubernetes provides a kubernetes registry -package kubernetes - -import ( - "encoding/json" - "fmt" - "os" - "regexp" - "strings" - "time" - - "go-micro.dev/v4/registry" - "go-micro.dev/v4/util/cmd" - - "github.com/pkg/errors" - - "github.com/go-micro/plugins/v4/registry/kubernetes/client" -) - -type kregistry struct { - client client.Kubernetes - timeout time.Duration - options registry.Options -} - -var ( - // used on pods as labels & services to select - // eg: svcSelectorPrefix+"svc.name" - svcSelectorPrefix = "micro.mu/selector-" - svcSelectorValue = "service" - - labelTypeKey = "micro.mu/type" - labelTypeValueService = "service" - - // used on k8s services to scope a serialized - // micro service by pod name. - annotationServiceKeyPrefix = "micro.mu/service-" - - // Pod status. - podRunning = "Running" - - // label name regex. - labelRe = regexp.MustCompilePOSIX("[-A-Za-z0-9_.]") -) - -// Err are all package errors. -var ( - ErrNoHostname = errors.New("failed to get podname from HOSTNAME variable") - ErrNoNodesFound = errors.New("you must provide at least one node") -) - -// podSelector. -var podSelector = map[string]string{ - labelTypeKey: labelTypeValueService, -} - -func init() { - cmd.DefaultRegistries["kubernetes"] = NewRegistry -} - -func configure(k *kregistry, opts ...registry.Option) error { - for _, o := range opts { - o(&k.options) - } - - // get first host - var host string - if len(k.options.Addrs) > 0 && len(k.options.Addrs[0]) > 0 { - host = k.options.Addrs[0] - } - - if k.options.Timeout == 0 { - k.options.Timeout = time.Second * 1 - } - - // if no hosts setup, assume InCluster - var c client.Kubernetes - if len(host) == 0 { - c = client.NewClientInCluster() - } else { - c = client.NewClientByHost(host) - } - - k.client = c - k.timeout = k.options.Timeout - - return nil -} - -// serviceName generates a valid service name for k8s labels. -func serviceName(name string) string { - aname := make([]byte, len(name)) - - for i, r := range []byte(name) { - if !labelRe.Match([]byte{r}) { - aname[i] = '_' - continue - } - - aname[i] = r - } - - return string(aname) -} - -// Init allows reconfig of options. -func (c *kregistry) Init(opts ...registry.Option) error { - return configure(c, opts...) -} - -// Options returns the registry Options. -func (c *kregistry) Options() registry.Options { - return c.options -} - -// Register sets a service selector label and an annotation with a -// serialized version of the service passed in. -func (c *kregistry) Register(s *registry.Service, opts ...registry.RegisterOption) error { - if len(s.Nodes) == 0 { - return ErrNoNodesFound - } - - svcName := s.Name - - // TODO: grab podname from somewhere better than this. - podName, err := getPodName() - if err != nil { - return errors.Wrap(err, "failed to register") - } - - // encode micro service - b, err := json.Marshal(s) - if err != nil { - return err - } - - svc := string(b) - - pod := &client.Pod{ - Metadata: &client.Meta{ - Labels: map[string]*string{ - labelTypeKey: &labelTypeValueService, - svcSelectorPrefix + serviceName(svcName): &svcSelectorValue, - }, - Annotations: map[string]*string{ - annotationServiceKeyPrefix + serviceName(svcName): &svc, - }, - }, - } - - if _, err := c.client.UpdatePod(podName, pod); err != nil { - return err - } - - return nil -} - -// Deregister nils out any things set in Register. -func (c *kregistry) Deregister(s *registry.Service, opts ...registry.DeregisterOption) error { - if len(s.Nodes) == 0 { - return ErrNoNodesFound - } - - svcName := s.Name - - // TODO: grab podname from somewhere better than env var. - podName, err := getPodName() - if err != nil { - return errors.Wrap(err, "failed to deregister") - } - - pod := &client.Pod{ - Metadata: &client.Meta{ - Labels: map[string]*string{ - svcSelectorPrefix + serviceName(svcName): nil, - }, - Annotations: map[string]*string{ - annotationServiceKeyPrefix + serviceName(svcName): nil, - }, - }, - } - - if _, err := c.client.UpdatePod(podName, pod); err != nil { - return err - } - - return nil -} - -// GetService will get all the pods with the given service selector, -// and build services from the annotations. -func (c *kregistry) GetService(name string, opts ...registry.GetOption) ([]*registry.Service, error) { - pods, err := c.client.ListPods(map[string]string{ - svcSelectorPrefix + serviceName(name): svcSelectorValue, - }) - if err != nil { - return nil, err - } - - if len(pods.Items) == 0 { - return nil, registry.ErrNotFound - } - - // svcs mapped by version - svcs := make(map[string]*registry.Service) - - // loop through items - for _, pod := range pods.Items { - if pod.Status.Phase != podRunning || pod.Metadata.DeletionTimestamp != "" { - continue - } - // get serialized service from annotation - svcStr, ok := pod.Metadata.Annotations[annotationServiceKeyPrefix+serviceName(name)] - if !ok { - continue - } - - var svc registry.Service - - // unmarshal service string - err := json.Unmarshal([]byte(*svcStr), &svc) - if err != nil { - return nil, fmt.Errorf("could not unmarshal service '%s' from pod annotation", name) - } - - // merge up pod service & ip with versioned service. - vs, ok := svcs[svc.Version] - if !ok { - svcs[svc.Version] = &svc - continue - } - - vs.Nodes = append(vs.Nodes, svc.Nodes...) - } - - list := make([]*registry.Service, 0, len(svcs)) - for _, val := range svcs { - list = append(list, val) - } - - return list, nil -} - -// ListServices will list all the service names. -func (c *kregistry) ListServices(opts ...registry.ListOption) ([]*registry.Service, error) { - pods, err := c.client.ListPods(podSelector) - if err != nil { - return nil, err - } - - // svcs mapped by name+version - svcs := make(map[string]*registry.Service) - - for _, pod := range pods.Items { - if pod.Status.Phase != podRunning || pod.Metadata.DeletionTimestamp != "" { - continue - } - - for k, v := range pod.Metadata.Annotations { - if !strings.HasPrefix(k, annotationServiceKeyPrefix) { - continue - } - - // we have to unmarshal the annotation itself since the - // key is encoded to match the regex restriction. - var svc registry.Service - if err := json.Unmarshal([]byte(*v), &svc); err != nil { - continue - } - - s, ok := svcs[svc.Name+svc.Version] - if !ok { - svcs[svc.Name+svc.Version] = &svc - continue - } - - // append to service:version nodes - s.Nodes = append(s.Nodes, svc.Nodes...) - } - } - - i := 0 - list := make([]*registry.Service, len(svcs)) - - for _, s := range svcs { - list[i] = s - i++ - } - - return list, nil -} - -// Watch returns a kubernetes watcher. -func (c *kregistry) Watch(opts ...registry.WatchOption) (registry.Watcher, error) { - return newWatcher(c, opts...) -} - -func (c *kregistry) String() string { - return "kubernetes" -} - -// NewRegistry creates a kubernetes registry. -func NewRegistry(opts ...registry.Option) registry.Registry { - k := &kregistry{ - options: registry.Options{}, - } - - //nolint:errcheck,gosec - configure(k, opts...) - - return k -} - -func getPodName() (string, error) { - podName := os.Getenv("HOSTNAME") - if len(podName) == 0 { - return "", ErrNoHostname - } - - return podName, nil -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/watcher.go b/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/watcher.go deleted file mode 100644 index b90180845ac..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/kubernetes/watcher.go +++ /dev/null @@ -1,267 +0,0 @@ -package kubernetes - -import ( - "encoding/json" - "errors" - "strings" - "sync" - - "go-micro.dev/v4/logger" - "go-micro.dev/v4/registry" - - "github.com/go-micro/plugins/v4/registry/kubernetes/client" - "github.com/go-micro/plugins/v4/registry/kubernetes/client/watch" -) - -var ( - deleteAction = "delete" -) - -type k8sWatcher struct { - registry *kregistry - watcher watch.Watch - next chan *registry.Result - - sync.RWMutex - pods map[string]*client.Pod - sync.Once -} - -// build a cache of pods when the watcher starts. -func (k *k8sWatcher) updateCache() ([]*registry.Result, error) { - podList, err := k.registry.client.ListPods(podSelector) - if err != nil { - return nil, err - } - - var results []*registry.Result - - for _, p := range podList.Items { - // Copy to new var as p gets overwritten by the loop - pod := p - rslts := k.buildPodResults(&pod, nil) - results = append(results, rslts...) - - k.Lock() - k.pods[pod.Metadata.Name] = &pod - k.Unlock() - } - - return results, nil -} - -// look through pod annotations, compare against cache if present -// and return a list of results to send down the wire. -func (k *k8sWatcher) buildPodResults(pod *client.Pod, cache *client.Pod) []*registry.Result { - var results []*registry.Result - - ignore := make(map[string]bool) - - if pod.Metadata != nil { - results, ignore = podBuildResult(pod, cache) - } - - // loop through cache annotations to find services - // not accounted for above, and "delete" them. - if cache != nil && cache.Metadata != nil { - for annKey, annVal := range cache.Metadata.Annotations { - if ignore[annKey] { - continue - } - - // check this annotation kv is a service notation - if !strings.HasPrefix(annKey, annotationServiceKeyPrefix) { - continue - } - - rslt := ®istry.Result{Action: deleteAction} - - // unmarshal service notation from annotation value - if err := json.Unmarshal([]byte(*annVal), &rslt.Service); err != nil { - continue - } - - results = append(results, rslt) - } - } - - return results -} - -// handleEvent will taken an event from the k8s pods API and do the correct -// things with the result, based on the local cache. -func (k *k8sWatcher) handleEvent(event watch.Event) { - var pod client.Pod - if err := json.Unmarshal([]byte(event.Object), &pod); err != nil { - logger.Error("K8s Watcher: Couldnt unmarshal event object from pod") - return - } - - //nolint:exhaustive - switch event.Type { - // Pod was modified - case watch.Modified: - k.RLock() - cache := k.pods[pod.Metadata.Name] - k.RUnlock() - - // service could have been added, edited or removed. - var results []*registry.Result - - if pod.Status.Phase == podRunning { - results = k.buildPodResults(&pod, cache) - } else { - // passing in cache might not return all results - results = k.buildPodResults(&pod, nil) - } - - for _, result := range results { - // pod isnt running - if pod.Status.Phase != podRunning || pod.Metadata.DeletionTimestamp != "" { - result.Action = deleteAction - } - k.next <- result - } - - k.Lock() - k.pods[pod.Metadata.Name] = &pod - k.Unlock() - - return - - // Pod was deleted - // passing in cache might not return all results - case watch.Deleted: - results := k.buildPodResults(&pod, nil) - - for _, result := range results { - result.Action = deleteAction - k.next <- result - } - - k.Lock() - delete(k.pods, pod.Metadata.Name) - k.Unlock() - - return - } -} - -// Next will block until a new result comes in. -func (k *k8sWatcher) Next() (*registry.Result, error) { - r, ok := <-k.next - if !ok { - return nil, errors.New("result chan closed") - } - - return r, nil -} - -// Stop will cancel any requests, and close channels. -func (k *k8sWatcher) Stop() { - k.watcher.Stop() - - select { - case <-k.next: - return - default: - k.Do(func() { - close(k.next) - }) - } -} - -func newWatcher(kr *kregistry, opts ...registry.WatchOption) (registry.Watcher, error) { - var wo registry.WatchOptions - for _, o := range opts { - o(&wo) - } - - selector := podSelector - if len(wo.Service) > 0 { - selector = map[string]string{ - svcSelectorPrefix + serviceName(wo.Service): svcSelectorValue, - } - } - - // Create watch request - watcher, err := kr.client.WatchPods(selector) - if err != nil { - return nil, err - } - - k := &k8sWatcher{ - registry: kr, - watcher: watcher, - next: make(chan *registry.Result), - pods: make(map[string]*client.Pod), - } - - // update cache, but dont emit changes - if _, err := k.updateCache(); err != nil { - return nil, err - } - - // range over watch request changes, and invoke - // the update event - go func() { - for event := range watcher.ResultChan() { - k.handleEvent(event) - } - - k.Stop() - }() - - return k, nil -} - -func podBuildResult(pod *client.Pod, cache *client.Pod) ([]*registry.Result, map[string]bool) { - results := make([]*registry.Result, 0, len(pod.Metadata.Annotations)) - ignore := make(map[string]bool) - - for annKey, annVal := range pod.Metadata.Annotations { - // check this annotation kv is a service notation - if !strings.HasPrefix(annKey, annotationServiceKeyPrefix) { - continue - } - - if annVal == nil { - continue - } - - // ignore when we check the cached annotations - // as we take care of it here - ignore[annKey] = true - - // compare against cache. - var ( - cacheExists bool - cav *string - ) - - if cache != nil && cache.Metadata != nil { - cav, cacheExists = cache.Metadata.Annotations[annKey] - if cacheExists && cav != nil && cav == annVal { - // service notation exists and is identical - - // no change result required. - continue - } - } - - rslt := ®istry.Result{} - if cacheExists { - rslt.Action = "update" - } else { - rslt.Action = "create" - } - - // unmarshal service notation from annotation value - if err := json.Unmarshal([]byte(*annVal), &rslt.Service); err != nil { - continue - } - - results = append(results, rslt) - } - - return results, ignore -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/mdns/LICENSE b/vendor/github.com/go-micro/plugins/v4/registry/mdns/LICENSE deleted file mode 100644 index 7d74e027ca9..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/mdns/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Asim Aslam. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-micro/plugins/v4/registry/mdns/mdns.go b/vendor/github.com/go-micro/plugins/v4/registry/mdns/mdns.go deleted file mode 100644 index 3e4c56ea6ee..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/mdns/mdns.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package mdns provides a multicast dns registry -package mdns - -import ( - "go-micro.dev/v4/registry" - "go-micro.dev/v4/util/cmd" -) - -func init() { - cmd.DefaultRegistries["mdns"] = NewRegistry -} - -// NewRegistry returns a new mdns registry. -func NewRegistry(opts ...registry.Option) registry.Registry { - return registry.NewRegistry(opts...) -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/nats/LICENSE b/vendor/github.com/go-micro/plugins/v4/registry/nats/LICENSE deleted file mode 100644 index 7d74e027ca9..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/nats/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Asim Aslam. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-micro/plugins/v4/registry/nats/nats.go b/vendor/github.com/go-micro/plugins/v4/registry/nats/nats.go deleted file mode 100644 index 6e0d6ebfae3..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/nats/nats.go +++ /dev/null @@ -1,422 +0,0 @@ -// Package nats provides a NATS registry using broadcast queries -package nats - -import ( - "context" - "encoding/json" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go" - "go-micro.dev/v4/registry" - "go-micro.dev/v4/util/cmd" -) - -type natsRegistry struct { - addrs []string - opts registry.Options - nopts nats.Options - queryTopic string - watchTopic string - registerAction string - - sync.RWMutex - conn *nats.Conn - services map[string][]*registry.Service - listeners map[string]chan bool -} - -var ( - defaultQueryTopic = "micro.registry.nats.query" - defaultWatchTopic = "micro.registry.nats.watch" - defaultRegisterAction = "create" -) - -func init() { - cmd.DefaultRegistries["nats"] = NewRegistry -} - -func configure(n *natsRegistry, opts ...registry.Option) error { - for _, o := range opts { - o(&n.opts) - } - - natsOptions := nats.GetDefaultOptions() - if n, ok := n.opts.Context.Value(optionsKey{}).(nats.Options); ok { - natsOptions = n - } - - queryTopic := defaultQueryTopic - if qt, ok := n.opts.Context.Value(queryTopicKey{}).(string); ok { - queryTopic = qt - } - - watchTopic := defaultWatchTopic - if wt, ok := n.opts.Context.Value(watchTopicKey{}).(string); ok { - watchTopic = wt - } - - registerAction := defaultRegisterAction - if ra, ok := n.opts.Context.Value(registerActionKey{}).(string); ok { - registerAction = ra - } - - // registry.Options have higher priority than nats.Options - // only if Addrs, Secure or TLSConfig were not set through a registry.Option - // we read them from nats.Option - if len(n.opts.Addrs) == 0 { - n.opts.Addrs = natsOptions.Servers - } - - if !n.opts.Secure { - n.opts.Secure = natsOptions.Secure - } - - if n.opts.TLSConfig == nil { - n.opts.TLSConfig = natsOptions.TLSConfig - } - - // check & add nats:// prefix (this makes also sure that the addresses - // stored in natsRegistry.addrs and n.opts.Addrs are identical) - n.opts.Addrs = setAddrs(n.opts.Addrs) - - n.addrs = n.opts.Addrs - n.nopts = natsOptions - n.queryTopic = queryTopic - n.watchTopic = watchTopic - n.registerAction = registerAction - - return nil -} - -func setAddrs(addrs []string) []string { - var cAddrs []string - for _, addr := range addrs { - if len(addr) == 0 { - continue - } - if !strings.HasPrefix(addr, "nats://") { - addr = "nats://" + addr - } - cAddrs = append(cAddrs, addr) - } - if len(cAddrs) == 0 { - cAddrs = []string{nats.DefaultURL} - } - return cAddrs -} - -func (n *natsRegistry) newConn() (*nats.Conn, error) { - opts := n.nopts - opts.Servers = n.addrs - opts.Secure = n.opts.Secure - opts.TLSConfig = n.opts.TLSConfig - - // secure might not be set - if opts.TLSConfig != nil { - opts.Secure = true - } - - return opts.Connect() -} - -func (n *natsRegistry) getConn() (*nats.Conn, error) { - n.Lock() - defer n.Unlock() - - if n.conn != nil { - return n.conn, nil - } - - c, err := n.newConn() - if err != nil { - return nil, err - } - n.conn = c - - return n.conn, nil -} - -func (n *natsRegistry) register(s *registry.Service) error { - conn, err := n.getConn() - if err != nil { - return err - } - - n.Lock() - defer n.Unlock() - - // cache service - n.services[s.Name] = addServices(n.services[s.Name], cp([]*registry.Service{s})) - - // create query listener - if n.listeners[s.Name] == nil { - listener := make(chan bool) - - // create a subscriber that responds to queries - sub, err := conn.Subscribe(n.queryTopic, func(m *nats.Msg) { - var result *registry.Result - - if err := json.Unmarshal(m.Data, &result); err != nil { - return - } - - var services []*registry.Service - - switch result.Action { - // is this a get query and we own the service? - case "get": - if result.Service.Name != s.Name { - return - } - n.RLock() - services = cp(n.services[s.Name]) - n.RUnlock() - // it's a list request, but we're still only a - // subscriber for this service... so just get this service - // totally suboptimal - case "list": - n.RLock() - services = cp(n.services[s.Name]) - n.RUnlock() - default: - // does not match - return - } - - // respond to query - for _, service := range services { - b, err := json.Marshal(service) - if err != nil { - continue - } - conn.Publish(m.Reply, b) - } - }) - if err != nil { - return err - } - - // Unsubscribe if we're told to do so - go func() { - <-listener - sub.Unsubscribe() - }() - - n.listeners[s.Name] = listener - } - - return nil -} - -func (n *natsRegistry) deregister(s *registry.Service) error { - n.Lock() - defer n.Unlock() - - services := delServices(n.services[s.Name], cp([]*registry.Service{s})) - if len(services) > 0 { - n.services[s.Name] = services - return nil - } - - // delete cached service - delete(n.services, s.Name) - - // delete query listener - if listener, lexists := n.listeners[s.Name]; lexists { - close(listener) - delete(n.listeners, s.Name) - } - - return nil -} - -func (n *natsRegistry) query(s string, quorum int) ([]*registry.Service, error) { - conn, err := n.getConn() - if err != nil { - return nil, err - } - - var action string - var service *registry.Service - - if len(s) > 0 { - action = "get" - service = ®istry.Service{Name: s} - } else { - action = "list" - } - - inbox := nats.NewInbox() - - response := make(chan *registry.Service, 10) - - sub, err := conn.Subscribe(inbox, func(m *nats.Msg) { - var service *registry.Service - if err := json.Unmarshal(m.Data, &service); err != nil { - return - } - select { - case response <- service: - case <-time.After(n.opts.Timeout): - } - }) - if err != nil { - return nil, err - } - defer sub.Unsubscribe() - - b, err := json.Marshal(®istry.Result{Action: action, Service: service}) - if err != nil { - return nil, err - } - - if err := conn.PublishMsg(&nats.Msg{ - Subject: n.queryTopic, - Reply: inbox, - Data: b, - }); err != nil { - return nil, err - } - - timeoutChan := time.After(n.opts.Timeout) - - serviceMap := make(map[string]*registry.Service) - -loop: - for { - select { - case service := <-response: - key := service.Name + "-" + service.Version - srv, ok := serviceMap[key] - if ok { - srv.Nodes = append(srv.Nodes, service.Nodes...) - serviceMap[key] = srv - } else { - serviceMap[key] = service - } - - if quorum > 0 && len(serviceMap[key].Nodes) >= quorum { - break loop - } - case <-timeoutChan: - break loop - } - } - - var services []*registry.Service - for _, service := range serviceMap { - services = append(services, service) - } - return services, nil -} - -func (n *natsRegistry) Init(opts ...registry.Option) error { - return configure(n, opts...) -} - -func (n *natsRegistry) Options() registry.Options { - return n.opts -} - -func (n *natsRegistry) Register(s *registry.Service, opts ...registry.RegisterOption) error { - if err := n.register(s); err != nil { - return err - } - - conn, err := n.getConn() - if err != nil { - return err - } - - b, err := json.Marshal(®istry.Result{Action: n.registerAction, Service: s}) - if err != nil { - return err - } - - return conn.Publish(n.watchTopic, b) -} - -func (n *natsRegistry) Deregister(s *registry.Service, opts ...registry.DeregisterOption) error { - if err := n.deregister(s); err != nil { - return err - } - - conn, err := n.getConn() - if err != nil { - return err - } - - b, err := json.Marshal(®istry.Result{Action: "delete", Service: s}) - if err != nil { - return err - } - return conn.Publish(n.watchTopic, b) -} - -func (n *natsRegistry) GetService(s string, opts ...registry.GetOption) ([]*registry.Service, error) { - services, err := n.query(s, getQuorum(n.opts)) - if err != nil { - return nil, err - } - return services, nil -} - -func (n *natsRegistry) ListServices(opts ...registry.ListOption) ([]*registry.Service, error) { - s, err := n.query("", 0) - if err != nil { - return nil, err - } - - var services []*registry.Service - serviceMap := make(map[string]*registry.Service) - - for _, v := range s { - serviceMap[v.Name] = ®istry.Service{Name: v.Name, Version: v.Version} - } - - for _, v := range serviceMap { - services = append(services, v) - } - - return services, nil -} - -func (n *natsRegistry) Watch(opts ...registry.WatchOption) (registry.Watcher, error) { - conn, err := n.getConn() - if err != nil { - return nil, err - } - - sub, err := conn.SubscribeSync(n.watchTopic) - if err != nil { - return nil, err - } - - var wo registry.WatchOptions - for _, o := range opts { - o(&wo) - } - - return &natsWatcher{sub, wo}, nil -} - -func (n *natsRegistry) String() string { - return "nats" -} - -func NewRegistry(opts ...registry.Option) registry.Registry { - options := registry.Options{ - Timeout: time.Millisecond * 100, - Context: context.Background(), - } - - n := &natsRegistry{ - opts: options, - services: make(map[string][]*registry.Service), - listeners: make(map[string]chan bool), - } - configure(n, opts...) - return n -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/nats/options.go b/vendor/github.com/go-micro/plugins/v4/registry/nats/options.go deleted file mode 100644 index 22db3b94e04..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/nats/options.go +++ /dev/null @@ -1,87 +0,0 @@ -package nats - -import ( - "context" - - "github.com/nats-io/nats.go" - "go-micro.dev/v4/registry" -) - -type contextQuorumKey struct{} -type optionsKey struct{} -type watchTopicKey struct{} -type queryTopicKey struct{} -type registerActionKey struct{} - -var ( - DefaultQuorum = 0 -) - -func getQuorum(o registry.Options) int { - if o.Context == nil { - return DefaultQuorum - } - - value := o.Context.Value(contextQuorumKey{}) - if v, ok := value.(int); ok { - return v - } else { - return DefaultQuorum - } -} - -func Quorum(n int) registry.Option { - return func(o *registry.Options) { - o.Context = context.WithValue(o.Context, contextQuorumKey{}, n) - } -} - -// Options allow to inject a nats.Options struct for configuring -// the nats connection. -func Options(nopts nats.Options) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, optionsKey{}, nopts) - } -} - -// QueryTopic allows to set a custom nats topic on which service registries -// query (survey) other services. All registries listen on this topic and -// then respond to the query message. -func QueryTopic(s string) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, queryTopicKey{}, s) - } -} - -// WatchTopic allows to set a custom nats topic on which registries broadcast -// changes (e.g. when services are added, updated or removed). Since we don't -// have a central registry service, each service typically broadcasts in a -// determined frequency on this topic. -func WatchTopic(s string) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, watchTopicKey{}, s) - } -} - -// RegisterAction allows to set the action to use when registering to nats. -// As of now there are three different options: -// - "create" (default) only registers if there is noone already registered under the same key. -// - "update" only updates the registration if it already exists. -// - "put" creates or updates a registration -func RegisterAction(s string) registry.Option { - return func(o *registry.Options) { - if o.Context == nil { - o.Context = context.Background() - } - o.Context = context.WithValue(o.Context, registerActionKey{}, s) - } -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/nats/util.go b/vendor/github.com/go-micro/plugins/v4/registry/nats/util.go deleted file mode 100644 index 002cba5219e..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/nats/util.go +++ /dev/null @@ -1,109 +0,0 @@ -package nats - -import ( - "go-micro.dev/v4/registry" -) - -func cp(current []*registry.Service) []*registry.Service { - var services []*registry.Service - - for _, service := range current { - // copy service - s := new(registry.Service) - *s = *service - - // copy nodes - var nodes []*registry.Node - for _, node := range service.Nodes { - n := new(registry.Node) - *n = *node - nodes = append(nodes, n) - } - s.Nodes = nodes - - // copy endpoints - var eps []*registry.Endpoint - for _, ep := range service.Endpoints { - e := new(registry.Endpoint) - *e = *ep - eps = append(eps, e) - } - s.Endpoints = eps - - // append service - services = append(services, s) - } - - return services -} - -func addNodes(old, neu []*registry.Node) []*registry.Node { - for _, n := range neu { - var seen bool - for i, o := range old { - if o.Id == n.Id { - seen = true - old[i] = n - break - } - } - if !seen { - old = append(old, n) - } - } - return old -} - -func addServices(old, neu []*registry.Service) []*registry.Service { - for _, s := range neu { - var seen bool - for i, o := range old { - if o.Version == s.Version { - s.Nodes = addNodes(o.Nodes, s.Nodes) - seen = true - old[i] = s - break - } - } - if !seen { - old = append(old, s) - } - } - return old -} - -func delNodes(old, del []*registry.Node) []*registry.Node { - var nodes []*registry.Node - for _, o := range old { - var rem bool - for _, n := range del { - if o.Id == n.Id { - rem = true - break - } - } - if !rem { - nodes = append(nodes, o) - } - } - return nodes -} - -func delServices(old, del []*registry.Service) []*registry.Service { - var services []*registry.Service - for i, o := range old { - var rem bool - for _, s := range del { - if o.Version == s.Version { - old[i].Nodes = delNodes(o.Nodes, s.Nodes) - if len(old[i].Nodes) == 0 { - rem = true - } - } - } - if !rem { - services = append(services, o) - } - } - return services -} diff --git a/vendor/github.com/go-micro/plugins/v4/registry/nats/watcher.go b/vendor/github.com/go-micro/plugins/v4/registry/nats/watcher.go deleted file mode 100644 index 48f7e29754f..00000000000 --- a/vendor/github.com/go-micro/plugins/v4/registry/nats/watcher.go +++ /dev/null @@ -1,39 +0,0 @@ -package nats - -import ( - "encoding/json" - "time" - - "github.com/nats-io/nats.go" - "go-micro.dev/v4/registry" -) - -type natsWatcher struct { - sub *nats.Subscription - wo registry.WatchOptions -} - -func (n *natsWatcher) Next() (*registry.Result, error) { - var result *registry.Result - for { - m, err := n.sub.NextMsg(time.Minute) - if err != nil && err == nats.ErrTimeout { - continue - } else if err != nil { - return nil, err - } - if err := json.Unmarshal(m.Data, &result); err != nil { - return nil, err - } - if len(n.wo.Service) > 0 && result.Service.Name != n.wo.Service { - continue - } - break - } - - return result, nil -} - -func (n *natsWatcher) Stop() { - n.sub.Unsubscribe() -} diff --git a/vendor/github.com/hashicorp/consul/api/LICENSE b/vendor/github.com/hashicorp/consul/api/LICENSE deleted file mode 100644 index c72625e4cc8..00000000000 --- a/vendor/github.com/hashicorp/consul/api/LICENSE +++ /dev/null @@ -1,356 +0,0 @@ -Copyright (c) 2013 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md deleted file mode 100644 index 96a867f279d..00000000000 --- a/vendor/github.com/hashicorp/consul/api/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Consul API Client - -This package provides the `api` package which provides programmatic access to the full Consul API. - -The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api). - -## Usage - -Below is an example of using the Consul client. To run the example, you must first -[install Consul](https://developer.hashicorp.com/consul/downloads) and -[Go](https://go.dev/doc/install). - -To run the client API, create a new Go module. - -```shell -go mod init consul-demo -``` - -Copy the example code into a file called `main.go` in the directory where the module is defined. -As seen in the example, the Consul API is often imported with the alias `capi`. - -```go -package main - -import ( - "fmt" - - capi "github.com/hashicorp/consul/api" -) - -func main() { - // Get a new client - client, err := capi.NewClient(capi.DefaultConfig()) - if err != nil { - panic(err) - } - - // Get a handle to the KV API - kv := client.KV() - - // PUT a new KV pair - p := &capi.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")} - _, err = kv.Put(p, nil) - if err != nil { - panic(err) - } - - // Lookup the pair - pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil) - if err != nil { - panic(err) - } - fmt.Printf("KV: %v %s\n", pair.Key, pair.Value) -} -``` - -Install the Consul API dependency with `go mod tidy`. - -In a separate terminal window, start a local Consul server. - -```shell -consul agent -dev -node machine -``` - -Run the example. - -```shell -go run . -``` - -You should get the following result printed to the terminal. - -```shell -KV: REDIS_MAXCLIENTS 1000 -``` - -After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go deleted file mode 100644 index 48d2e66ee97..00000000000 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ /dev/null @@ -1,1625 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "encoding/json" - "fmt" - "io" - "net/url" - "time" - - "github.com/mitchellh/mapstructure" -) - -const ( - // ACLClientType is the client type token - ACLClientType = "client" - - // ACLManagementType is the management type token - ACLManagementType = "management" -) - -type ACLLink struct { - ID string - Name string -} - -type ACLTokenPolicyLink = ACLLink -type ACLTokenRoleLink = ACLLink - -// ACLToken represents an ACL Token -type ACLToken struct { - CreateIndex uint64 - ModifyIndex uint64 - AccessorID string - SecretID string - Description string - Policies []*ACLTokenPolicyLink `json:",omitempty"` - Roles []*ACLTokenRoleLink `json:",omitempty"` - ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` - NodeIdentities []*ACLNodeIdentity `json:",omitempty"` - Local bool - AuthMethod string `json:",omitempty"` - ExpirationTTL time.Duration `json:",omitempty"` - ExpirationTime *time.Time `json:",omitempty"` - CreateTime time.Time `json:",omitempty"` - Hash []byte `json:",omitempty"` - - // DEPRECATED (ACL-Legacy-Compat) - // Rules are an artifact of legacy tokens deprecated in Consul 1.4 - Rules string `json:"-"` - - // Namespace is the namespace the ACLToken is associated with. - // Namespaces are a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLToken is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // AuthMethodNamespace is the namespace the token's AuthMethod is associated with. - // Namespacing is a Consul Enterprise feature. - AuthMethodNamespace string `json:",omitempty"` -} - -type ACLTokenExpanded struct { - ExpandedPolicies []ACLPolicy - ExpandedRoles []ACLRole - - NamespaceDefaultPolicyIDs []string - NamespaceDefaultRoleIDs []string - - AgentACLDefaultPolicy string - AgentACLDownPolicy string - ResolvedByAgent string - - ACLToken -} - -type ACLTokenListEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - AccessorID string - SecretID string - Description string - Policies []*ACLTokenPolicyLink `json:",omitempty"` - Roles []*ACLTokenRoleLink `json:",omitempty"` - ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` - NodeIdentities []*ACLNodeIdentity `json:",omitempty"` - Local bool - AuthMethod string `json:",omitempty"` - ExpirationTime *time.Time `json:",omitempty"` - CreateTime time.Time - Hash []byte - Legacy bool `json:"-"` // DEPRECATED - - // Namespace is the namespace the ACLTokenListEntry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLTokenListEntry is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // AuthMethodNamespace is the namespace the token's AuthMethod is associated with. - // Namespacing is a Consul Enterprise feature. - AuthMethodNamespace string `json:",omitempty"` -} - -// ACLEntry is used to represent a legacy ACL token -// The legacy tokens are deprecated. -type ACLEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - ID string - Name string - Type string - Rules string -} - -// ACLReplicationStatus is used to represent the status of ACL replication. -type ACLReplicationStatus struct { - Enabled bool - Running bool - SourceDatacenter string - ReplicationType string - ReplicatedIndex uint64 - ReplicatedRoleIndex uint64 - ReplicatedTokenIndex uint64 - LastSuccess time.Time - LastError time.Time - LastErrorMessage string -} - -// ACLServiceIdentity represents a high-level grant of all necessary privileges -// to assume the identity of the named Service in the Catalog and within -// Connect. -type ACLServiceIdentity struct { - ServiceName string - Datacenters []string `json:",omitempty"` -} - -// ACLNodeIdentity represents a high-level grant of all necessary privileges -// to assume the identity of the named Node in the Catalog and within Connect. -type ACLNodeIdentity struct { - NodeName string - Datacenter string -} - -// ACLPolicy represents an ACL Policy. -type ACLPolicy struct { - ID string - Name string - Description string - Rules string - Datacenters []string - Hash []byte - CreateIndex uint64 - ModifyIndex uint64 - - // Namespace is the namespace the ACLPolicy is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLPolicy is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -type ACLPolicyListEntry struct { - ID string - Name string - Description string - Datacenters []string - Hash []byte - CreateIndex uint64 - ModifyIndex uint64 - - // Namespace is the namespace the ACLPolicyListEntry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLPolicyListEntry is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -type ACLRolePolicyLink = ACLLink - -// ACLRole represents an ACL Role. -type ACLRole struct { - ID string - Name string - Description string - Policies []*ACLRolePolicyLink `json:",omitempty"` - ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` - NodeIdentities []*ACLNodeIdentity `json:",omitempty"` - Hash []byte - CreateIndex uint64 - ModifyIndex uint64 - - // Namespace is the namespace the ACLRole is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLRole is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -// BindingRuleBindType is the type of binding rule mechanism used. -type BindingRuleBindType string - -const ( - // BindingRuleBindTypeService binds to a service identity with the given name. - BindingRuleBindTypeService BindingRuleBindType = "service" - - // BindingRuleBindTypeRole binds to pre-existing roles with the given name. - BindingRuleBindTypeRole BindingRuleBindType = "role" -) - -type ACLBindingRule struct { - ID string - Description string - AuthMethod string - Selector string - BindType BindingRuleBindType - BindName string - - CreateIndex uint64 - ModifyIndex uint64 - - // Namespace is the namespace the ACLBindingRule is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLBindingRule is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -type ACLAuthMethod struct { - Name string - Type string - DisplayName string `json:",omitempty"` - Description string `json:",omitempty"` - MaxTokenTTL time.Duration `json:",omitempty"` - - // TokenLocality defines the kind of token that this auth method produces. - // This can be either 'local' or 'global'. If empty 'local' is assumed. - TokenLocality string `json:",omitempty"` - - // Configuration is arbitrary configuration for the auth method. This - // should only contain primitive values and containers (such as lists and - // maps). - Config map[string]interface{} - - CreateIndex uint64 - ModifyIndex uint64 - - // NamespaceRules apply only on auth methods defined in the default namespace. - // Namespacing is a Consul Enterprise feature. - NamespaceRules []*ACLAuthMethodNamespaceRule `json:",omitempty"` - - // Namespace is the namespace the ACLAuthMethod is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLAuthMethod is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -type ACLTokenFilterOptions struct { - AuthMethod string `json:",omitempty"` - Policy string `json:",omitempty"` - Role string `json:",omitempty"` - ServiceName string `json:",omitempty"` -} - -func (m *ACLAuthMethod) MarshalJSON() ([]byte, error) { - type Alias ACLAuthMethod - exported := &struct { - MaxTokenTTL string `json:",omitempty"` - *Alias - }{ - MaxTokenTTL: m.MaxTokenTTL.String(), - Alias: (*Alias)(m), - } - if m.MaxTokenTTL == 0 { - exported.MaxTokenTTL = "" - } - - return json.Marshal(exported) -} - -func (m *ACLAuthMethod) UnmarshalJSON(data []byte) error { - type Alias ACLAuthMethod - aux := &struct { - MaxTokenTTL string - *Alias - }{ - Alias: (*Alias)(m), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.MaxTokenTTL != "" { - if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil { - return err - } - } - - return nil -} - -type ACLAuthMethodNamespaceRule struct { - // Selector is an expression that matches against verified identity - // attributes returned from the auth method during login. - Selector string `json:",omitempty"` - - // BindNamespace is the target namespace of the binding. Can be lightly - // templated using HIL ${foo} syntax from available field names. - // - // If empty it's created in the same namespace as the auth method. - BindNamespace string `json:",omitempty"` -} - -type ACLAuthMethodListEntry struct { - Name string - Type string - DisplayName string `json:",omitempty"` - Description string `json:",omitempty"` - MaxTokenTTL time.Duration `json:",omitempty"` - - // TokenLocality defines the kind of token that this auth method produces. - // This can be either 'local' or 'global'. If empty 'local' is assumed. - TokenLocality string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 - - // Namespace is the namespace the ACLAuthMethodListEntry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the ACLAuthMethodListEntry is associated with. - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -// This is nearly identical to the ACLAuthMethod MarshalJSON -func (m *ACLAuthMethodListEntry) MarshalJSON() ([]byte, error) { - type Alias ACLAuthMethodListEntry - exported := &struct { - MaxTokenTTL string `json:",omitempty"` - *Alias - }{ - MaxTokenTTL: m.MaxTokenTTL.String(), - Alias: (*Alias)(m), - } - if m.MaxTokenTTL == 0 { - exported.MaxTokenTTL = "" - } - - return json.Marshal(exported) -} - -// This is nearly identical to the ACLAuthMethod UnmarshalJSON -func (m *ACLAuthMethodListEntry) UnmarshalJSON(data []byte) error { - type Alias ACLAuthMethodListEntry - aux := &struct { - MaxTokenTTL string - *Alias - }{ - Alias: (*Alias)(m), - } - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.MaxTokenTTL != "" { - if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil { - return err - } - } - - return nil -} - -// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed -// KubernetesAuthMethodConfig. -func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) { - var config KubernetesAuthMethodConfig - decodeConf := &mapstructure.DecoderConfig{ - Result: &config, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decodeConf) - if err != nil { - return nil, err - } - - if err := decoder.Decode(raw); err != nil { - return nil, fmt.Errorf("error decoding config: %s", err) - } - - return &config, nil -} - -// KubernetesAuthMethodConfig is the config for the built-in Consul auth method -// for Kubernetes. -type KubernetesAuthMethodConfig struct { - Host string `json:",omitempty"` - CACert string `json:",omitempty"` - ServiceAccountJWT string `json:",omitempty"` -} - -// RenderToConfig converts this into a map[string]interface{} suitable for use -// in the ACLAuthMethod.Config field. -func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} { - return map[string]interface{}{ - "Host": c.Host, - "CACert": c.CACert, - "ServiceAccountJWT": c.ServiceAccountJWT, - } -} - -// OIDCAuthMethodConfig is the config for the built-in Consul auth method for -// OIDC and JWT. -type OIDCAuthMethodConfig struct { - // common for type=oidc and type=jwt - JWTSupportedAlgs []string `json:",omitempty"` - BoundAudiences []string `json:",omitempty"` - ClaimMappings map[string]string `json:",omitempty"` - ListClaimMappings map[string]string `json:",omitempty"` - OIDCDiscoveryURL string `json:",omitempty"` - OIDCDiscoveryCACert string `json:",omitempty"` - // just for type=oidc - OIDCClientID string `json:",omitempty"` - OIDCClientSecret string `json:",omitempty"` - OIDCScopes []string `json:",omitempty"` - OIDCACRValues []string `json:",omitempty"` - AllowedRedirectURIs []string `json:",omitempty"` - VerboseOIDCLogging bool `json:",omitempty"` - // just for type=jwt - JWKSURL string `json:",omitempty"` - JWKSCACert string `json:",omitempty"` - JWTValidationPubKeys []string `json:",omitempty"` - BoundIssuer string `json:",omitempty"` - ExpirationLeeway time.Duration `json:",omitempty"` - NotBeforeLeeway time.Duration `json:",omitempty"` - ClockSkewLeeway time.Duration `json:",omitempty"` -} - -// RenderToConfig converts this into a map[string]interface{} suitable for use -// in the ACLAuthMethod.Config field. -func (c *OIDCAuthMethodConfig) RenderToConfig() map[string]interface{} { - return map[string]interface{}{ - // common for type=oidc and type=jwt - "JWTSupportedAlgs": c.JWTSupportedAlgs, - "BoundAudiences": c.BoundAudiences, - "ClaimMappings": c.ClaimMappings, - "ListClaimMappings": c.ListClaimMappings, - "OIDCDiscoveryURL": c.OIDCDiscoveryURL, - "OIDCDiscoveryCACert": c.OIDCDiscoveryCACert, - // just for type=oidc - "OIDCClientID": c.OIDCClientID, - "OIDCClientSecret": c.OIDCClientSecret, - "OIDCScopes": c.OIDCScopes, - "OIDCACRValues": c.OIDCACRValues, - "AllowedRedirectURIs": c.AllowedRedirectURIs, - "VerboseOIDCLogging": c.VerboseOIDCLogging, - // just for type=jwt - "JWKSURL": c.JWKSURL, - "JWKSCACert": c.JWKSCACert, - "JWTValidationPubKeys": c.JWTValidationPubKeys, - "BoundIssuer": c.BoundIssuer, - "ExpirationLeeway": c.ExpirationLeeway, - "NotBeforeLeeway": c.NotBeforeLeeway, - "ClockSkewLeeway": c.ClockSkewLeeway, - } -} - -type ACLLoginParams struct { - AuthMethod string - BearerToken string - Meta map[string]string `json:",omitempty"` -} - -type ACLOIDCAuthURLParams struct { - AuthMethod string - RedirectURI string - ClientNonce string - Meta map[string]string `json:",omitempty"` -} - -// ACL can be used to query the ACL endpoints -type ACL struct { - c *Client -} - -// ACL returns a handle to the ACL endpoints -func (c *Client) ACL() *ACL { - return &ACL{c} -} - -// BootstrapRequest is used for when operators provide an ACL Bootstrap Token -type BootstrapRequest struct { - BootstrapSecret string -} - -// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster -// to get the first management token. -func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) { - return a.BootstrapWithToken("") -} - -// BootstrapWithToken is used to get the initial bootstrap token or pass in the one that was provided in the API -func (a *ACL) BootstrapWithToken(btoken string) (*ACLToken, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/bootstrap") - if btoken != "" { - r.obj = &BootstrapRequest{ - BootstrapSecret: btoken, - } - } - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, wm, nil -} - -// Create is used to generate a new token with the given parameters -// -// Deprecated: Use TokenCreate instead. -func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/create") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update is used to update the rules of an existing token -// -// Deprecated: Use TokenUpdate instead. -func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/update") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Destroy is used to destroy a given ACL token ID -// -// Deprecated: Use TokenDelete instead. -func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - if err := requireOK(resp); err != nil { - return nil, err - } - closeResponseBody(resp) - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Clone is used to return a new token cloned from an existing one -// -// Deprecated: Use TokenClone instead. -func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Info is used to query for information about an ACL token -// -// Deprecated: Use TokenRead instead. -func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/info/"+id) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to get all the ACL tokens -// -// Deprecated: Use TokenList instead. -func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/list") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Replication returns the status of the ACL replication process in the datacenter -func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/replication") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries *ACLReplicationStatus - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields -// of the ACLToken structure are empty they will be filled in by Consul. -func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/token") - r.setWriteOptions(q) - r.obj = token - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// TokenUpdate updates a token in place without modifying its AccessorID or SecretID. A valid -// AccessorID must be set in the ACLToken structure passed to this function but the SecretID may -// be omitted and will be filled in by Consul with its existing value. -func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if token.AccessorID == "" { - return nil, nil, fmt.Errorf("Must specify an AccessorID for Token Updating") - } - r := a.c.newRequest("PUT", "/v1/acl/token/"+token.AccessorID) - r.setWriteOptions(q) - r.obj = token - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// TokenClone will create a new token with the same policies and locality as the original -// token but will have its own auto-generated AccessorID and SecretID as well having the -// description passed to this function. The accessorID parameter must be a valid Accessor ID -// of an existing token. -func (a *ACL) TokenClone(accessorID string, description string, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if accessorID == "" { - return nil, nil, fmt.Errorf("Must specify a token AccessorID for Token Cloning") - } - - r := a.c.newRequest("PUT", "/v1/acl/token/"+accessorID+"/clone") - r.setWriteOptions(q) - r.obj = struct{ Description string }{description} - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// TokenDelete removes a single ACL token. The accessorID parameter must be a valid -// Accessor ID of an existing token. -func (a *ACL) TokenDelete(accessorID string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("DELETE", "/v1/acl/token/"+accessorID) - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - if err := requireOK(resp); err != nil { - return nil, err - } - closeResponseBody(resp) - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// TokenRead retrieves the full token details. The accessorID parameter must be a valid -// Accessor ID of an existing token. -func (a *ACL) TokenRead(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/token/"+accessorID) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// TokenReadExpanded retrieves the full token details, as well as the contents of any policies affecting the token. -// The accessorID parameter must be a valid Accessor ID of an existing token. -func (a *ACL) TokenReadExpanded(accessorID string, q *QueryOptions) (*ACLTokenExpanded, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/token/"+accessorID) - r.setQueryOptions(q) - r.params.Set("expanded", "true") - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out ACLTokenExpanded - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// TokenReadSelf retrieves the full token details of the token currently -// assigned to the API Client. In this manner its possible to read a token -// by its Secret ID. -func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/token/self") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// TokenList lists all tokens. The listing does not contain any SecretIDs as those -// may only be retrieved by a call to TokenRead. -func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/tokens") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLTokenListEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// TokenListFiltered lists all tokens that match the given filter options. -// The listing does not contain any SecretIDs as those may only be retrieved by a call to TokenRead. -func (a *ACL) TokenListFiltered(t ACLTokenFilterOptions, q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/tokens") - r.setQueryOptions(q) - - if t.AuthMethod != "" { - r.params.Set("authmethod", t.AuthMethod) - } - if t.Policy != "" { - r.params.Set("policy", t.Policy) - } - if t.Role != "" { - r.params.Set("role", t.Role) - } - if t.ServiceName != "" { - r.params.Set("servicename", t.ServiceName) - } - - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLTokenListEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// PolicyCreate will create a new policy. It is not allowed for the policy parameters -// ID field to be set as this will be generated by Consul while processing the request. -func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { - if policy.ID != "" { - return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation") - } - r := a.c.newRequest("PUT", "/v1/acl/policy") - r.setWriteOptions(q) - r.obj = policy - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLPolicy - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// PolicyUpdate updates a policy. The ID field of the policy parameter must be set to an -// existing policy ID -func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { - if policy.ID == "" { - return nil, nil, fmt.Errorf("Must specify an ID in Policy Update") - } - - r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID) - r.setWriteOptions(q) - r.obj = policy - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLPolicy - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// PolicyDelete deletes a policy given its ID. -func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("DELETE", "/v1/acl/policy/"+policyID) - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// PolicyRead retrieves the policy details including the rule set. -func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/policy/"+policyID) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out ACLPolicy - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// PolicyReadByName retrieves the policy details including the rule set with name. -func (a *ACL) PolicyReadByName(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/policy/name/"+url.QueryEscape(policyName)) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - var out ACLPolicy - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// PolicyList retrieves a listing of all policies. The listing does not include the -// rules for any policy as those should be retrieved by subsequent calls to PolicyRead. -func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/policies") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLPolicyListEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// RulesTranslate translates the legacy rule syntax into the current syntax. -// -// Deprecated: Support for the legacy syntax translation has been removed. -// This function always returns an error. -func (a *ACL) RulesTranslate(rules io.Reader) (string, error) { - return "", fmt.Errorf("Legacy ACL rules were deprecated in Consul 1.4") -} - -// RulesTranslateToken translates the rules associated with the legacy syntax -// into the current syntax and returns the results. -// -// Deprecated: Support for the legacy syntax translation has been removed. -// This function always returns an error. -func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { - return "", fmt.Errorf("Legacy ACL tokens and rules were deprecated in Consul 1.4") -} - -// RoleCreate will create a new role. It is not allowed for the role parameters -// ID field to be set as this will be generated by Consul while processing the request. -func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { - if role.ID != "" { - return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation") - } - - r := a.c.newRequest("PUT", "/v1/acl/role") - r.setWriteOptions(q) - r.obj = role - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLRole - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// RoleUpdate updates a role. The ID field of the role parameter must be set to an -// existing role ID -func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { - if role.ID == "" { - return nil, nil, fmt.Errorf("Must specify an ID in Role Update") - } - - r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID) - r.setWriteOptions(q) - r.obj = role - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLRole - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// RoleDelete deletes a role given its ID. -func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID) - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - if err := requireOK(resp); err != nil { - return nil, err - } - closeResponseBody(resp) - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// RoleRead retrieves the role details (by ID). Returns nil if not found. -func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/role/"+roleID) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - var out ACLRole - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// RoleReadByName retrieves the role details (by name). Returns nil if not found. -func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName)) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - var out ACLRole - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// RoleList retrieves a listing of all roles. The listing does not include some -// metadata for the role as those should be retrieved by subsequent calls to -// RoleRead. -func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/roles") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLRole - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// AuthMethodCreate will create a new auth method. -func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { - if method.Name == "" { - return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation") - } - - r := a.c.newRequest("PUT", "/v1/acl/auth-method") - r.setWriteOptions(q) - r.obj = method - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLAuthMethod - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// AuthMethodUpdate updates an auth method. -func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { - if method.Name == "" { - return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update") - } - - r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name)) - r.setWriteOptions(q) - r.obj = method - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLAuthMethod - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// AuthMethodDelete deletes an auth method given its Name. -func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) { - if methodName == "" { - return nil, fmt.Errorf("Must specify a Name in Auth Method Delete") - } - - r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - if err := requireOK(resp); err != nil { - return nil, err - } - closeResponseBody(resp) - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// AuthMethodRead retrieves the auth method. Returns nil if not found. -func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) { - if methodName == "" { - return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read") - } - - r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - var out ACLAuthMethod - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// AuthMethodList retrieves a listing of all auth methods. The listing does not -// include some metadata for the auth method as those should be retrieved by -// subsequent calls to AuthMethodRead. -func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/auth-methods") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLAuthMethodListEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// BindingRuleCreate will create a new binding rule. It is not allowed for the -// binding rule parameter's ID field to be set as this will be generated by -// Consul while processing the request. -func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { - if rule.ID != "" { - return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation") - } - - r := a.c.newRequest("PUT", "/v1/acl/binding-rule") - r.setWriteOptions(q) - r.obj = rule - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLBindingRule - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// BindingRuleUpdate updates a binding rule. The ID field of the role binding -// rule parameter must be set to an existing binding rule ID. -func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { - if rule.ID == "" { - return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update") - } - - r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID) - r.setWriteOptions(q) - r.obj = rule - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLBindingRule - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// BindingRuleDelete deletes a binding rule given its ID. -func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID) - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// BindingRuleRead retrieves the binding rule details. Returns nil if not found. -func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - var out ACLBindingRule - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -// BindingRuleList retrieves a listing of all binding rules. -func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/binding-rules") - if methodName != "" { - r.params.Set("authmethod", methodName) - } - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLBindingRule - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Login is used to exchange auth method credentials for a newly-minted Consul Token. -func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - r := a.c.newRequest("POST", "/v1/acl/login") - r.setWriteOptions(q) - r.obj = auth - - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, wm, nil -} - -// Logout is used to destroy a Consul Token created via Login(). -func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("POST", "/v1/acl/logout") - r.setWriteOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - if err := requireOK(resp); err != nil { - return nil, err - } - closeResponseBody(resp) - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// OIDCAuthURL requests an authorization URL to start an OIDC login flow. -func (a *ACL) OIDCAuthURL(auth *ACLOIDCAuthURLParams, q *WriteOptions) (string, *WriteMeta, error) { - if auth.AuthMethod == "" { - return "", nil, fmt.Errorf("Must specify an auth method name") - } - - r := a.c.newRequest("POST", "/v1/acl/oidc/auth-url") - r.setWriteOptions(q) - r.obj = auth - - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out aclOIDCAuthURLResponse - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.AuthURL, wm, nil -} - -type aclOIDCAuthURLResponse struct { - AuthURL string -} - -type ACLOIDCCallbackParams struct { - AuthMethod string - State string - Code string - ClientNonce string -} - -// OIDCCallback is the callback endpoint to complete an OIDC login. -func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if auth.AuthMethod == "" { - return nil, nil, fmt.Errorf("Must specify an auth method name") - } - - r := a.c.newRequest("POST", "/v1/acl/oidc/callback") - r.setWriteOptions(q) - r.obj = auth - - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - wm := &WriteMeta{RequestTime: rtt} - var out ACLToken - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go deleted file mode 100644 index 6775edf4257..00000000000 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ /dev/null @@ -1,1428 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "net/http" -) - -// ServiceKind is the kind of service being registered. -type ServiceKind string - -const ( - // ServiceKindTypical is a typical, classic Consul service. This is - // represented by the absence of a value. This was chosen for ease of - // backwards compatibility: existing services in the catalog would - // default to the typical service. - ServiceKindTypical ServiceKind = "" - - // ServiceKindConnectProxy is a proxy for the Connect feature. This - // service proxies another service within Consul and speaks the connect - // protocol. - ServiceKindConnectProxy ServiceKind = "connect-proxy" - - // ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This - // service will proxy connections based off the SNI header set by other - // connect proxies - ServiceKindMeshGateway ServiceKind = "mesh-gateway" - - // ServiceKindTerminatingGateway is a Terminating Gateway for the Connect - // feature. This service will proxy connections to services outside the mesh. - ServiceKindTerminatingGateway ServiceKind = "terminating-gateway" - - // ServiceKindIngressGateway is an Ingress Gateway for the Connect feature. - // This service will ingress connections based of configuration defined in - // the ingress-gateway config entry. - ServiceKindIngressGateway ServiceKind = "ingress-gateway" - - // ServiceKindAPIGateway is an API Gateway for the Connect feature. - // This service will ingress connections based of configuration defined in - // the api-gateway config entry. - ServiceKindAPIGateway ServiceKind = "api-gateway" -) - -// UpstreamDestType is the type of upstream discovery mechanism. -type UpstreamDestType string - -const ( - // UpstreamDestTypeService discovers instances via healthy service lookup. - UpstreamDestTypeService UpstreamDestType = "service" - - // UpstreamDestTypePreparedQuery discovers instances via prepared query - // execution. - UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query" -) - -// AgentCheck represents a check known to the agent -type AgentCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string - Type string - ExposedPort int - Definition HealthCheckDefinition - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` -} - -// AgentWeights represent optional weights for a service -type AgentWeights struct { - Passing int - Warning int -} - -// AgentService represents a service known to the agent -type AgentService struct { - Kind ServiceKind `json:",omitempty"` - ID string - Service string - Tags []string - Meta map[string]string - Port int - Address string - SocketPath string `json:",omitempty"` - TaggedAddresses map[string]ServiceAddress `json:",omitempty"` - Weights AgentWeights - EnableTagOverride bool - CreateIndex uint64 `json:",omitempty" bexpr:"-"` - ModifyIndex uint64 `json:",omitempty" bexpr:"-"` - ContentHash string `json:",omitempty" bexpr:"-"` - Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` - Connect *AgentServiceConnect `json:",omitempty"` - PeerName string `json:",omitempty"` - // NOTE: If we ever set the ContentHash outside of singular service lookup then we may need - // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests. - // For now though, ignoring it works well enough. - Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` - Partition string `json:",omitempty" bexpr:"-" hash:"ignore"` - // Datacenter is only ever returned and is ignored if presented. - Datacenter string `json:",omitempty" bexpr:"-" hash:"ignore"` - Locality *Locality `json:",omitempty" bexpr:"-" hash:"ignore"` -} - -// AgentServiceChecksInfo returns information about a Service and its checks -type AgentServiceChecksInfo struct { - AggregatedStatus string - Service *AgentService - Checks HealthChecks -} - -// AgentServiceConnect represents the Connect configuration of a service. -type AgentServiceConnect struct { - Native bool `json:",omitempty"` - SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"` -} - -// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy -// ServiceDefinition or response. -type AgentServiceConnectProxyConfig struct { - EnvoyExtensions []EnvoyExtension `json:",omitempty"` - DestinationServiceName string `json:",omitempty"` - DestinationServiceID string `json:",omitempty"` - LocalServiceAddress string `json:",omitempty"` - LocalServicePort int `json:",omitempty"` - LocalServiceSocketPath string `json:",omitempty"` - Mode ProxyMode `json:",omitempty"` - TransparentProxy *TransparentProxyConfig `json:",omitempty"` - Config map[string]interface{} `json:",omitempty" bexpr:"-"` - Upstreams []Upstream `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty"` - Expose ExposeConfig `json:",omitempty"` - AccessLogs *AccessLogsConfig `json:",omitempty"` -} - -const ( - // MemberTagKeyACLMode is the key used to indicate what ACL mode the agent is - // operating in. The values of this key will be one of the MemberACLMode constants - // with the key not being present indicating ACLModeUnknown. - MemberTagKeyACLMode = "acls" - - // MemberTagRole is the key used to indicate that the member is a server or not. - MemberTagKeyRole = "role" - - // MemberTagValueRoleServer is the value of the MemberTagKeyRole used to indicate - // that the member represents a Consul server. - MemberTagValueRoleServer = "consul" - - // MemberTagValueRoleClient is the value of the MemberTagKeyRole used to indicate - // that the member represents a Consul client. - MemberTagValueRoleClient = "node" - - // MemberTagKeyDatacenter is the key used to indicate which datacenter this member is in. - MemberTagKeyDatacenter = "dc" - - // MemberTagKeySegment is the key name of the tag used to indicate which network - // segment this member is in. - // Network Segments are a Consul Enterprise feature. - MemberTagKeySegment = "segment" - - // MemberTagKeyPartition is the key name of the tag used to indicate which partition - // this member is in. - // Partitions are a Consul Enterprise feature. - MemberTagKeyPartition = "ap" - - // MemberTagKeyBootstrap is the key name of the tag used to indicate whether this - // agent was started with the "bootstrap" configuration enabled - MemberTagKeyBootstrap = "bootstrap" - // MemberTagValueBootstrap is the value of the MemberTagKeyBootstrap key when the - // agent was started with the "bootstrap" configuration enabled. - MemberTagValueBootstrap = "1" - - // MemberTagKeyBootstrapExpect is the key name of the tag used to indicate whether - // this agent was started with the "bootstrap_expect" configuration set to a non-zero - // value. The value of this key will be the string for of that configuration value. - MemberTagKeyBootstrapExpect = "expect" - - // MemberTagKeyUseTLS is the key name of the tag used to indicate whther this agent - // was configured to use TLS. - MemberTagKeyUseTLS = "use_tls" - // MemberTagValueUseTLS is the value of the MemberTagKeyUseTLS when the agent was - // configured to use TLS. Any other value indicates that it was not setup in - // that manner. - MemberTagValueUseTLS = "1" - - // MemberTagKeyReadReplica is the key used to indicate that the member is a read - // replica server (will remain a Raft non-voter). - // Read Replicas are a Consul Enterprise feature. - MemberTagKeyReadReplica = "read_replica" - // MemberTagValueReadReplica is the value of the MemberTagKeyReadReplica key when - // the member is in fact a read-replica. Any other value indicates that it is not. - // Read Replicas are a Consul Enterprise feature. - MemberTagValueReadReplica = "1" -) - -type MemberACLMode string - -const ( - // ACLModeDisables indicates that ACLs are disabled for this agent - ACLModeDisabled MemberACLMode = "0" - // ACLModeEnabled indicates that ACLs are enabled and operating in new ACL - // mode (v1.4.0+ ACLs) - ACLModeEnabled MemberACLMode = "1" - // ACLModeLegacy has been deprecated, and will be treated as ACLModeUnknown. - ACLModeLegacy MemberACLMode = "2" // DEPRECATED - // ACLModeUnkown is used to indicate that the AgentMember.Tags didn't advertise - // an ACL mode at all. This is the case for Consul versions before v1.4.0 and - // should be treated the same as ACLModeLegacy. - ACLModeUnknown MemberACLMode = "3" -) - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - // Status of the Member which corresponds to github.com/hashicorp/serf/serf.MemberStatus - // Value is one of: - // - // AgentMemberNone = 0 - // AgentMemberAlive = 1 - // AgentMemberLeaving = 2 - // AgentMemberLeft = 3 - // AgentMemberFailed = 4 - Status int - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// ACLMode returns the ACL mode this agent is operating in. -func (m *AgentMember) ACLMode() MemberACLMode { - mode := m.Tags[MemberTagKeyACLMode] - - // the key may not have existed but then an - // empty string will be returned and we will - // handle that in the default case of the switch - switch MemberACLMode(mode) { - case ACLModeDisabled: - return ACLModeDisabled - case ACLModeEnabled: - return ACLModeEnabled - default: - return ACLModeUnknown - } -} - -// IsConsulServer returns true when this member is a Consul server. -func (m *AgentMember) IsConsulServer() bool { - return m.Tags[MemberTagKeyRole] == MemberTagValueRoleServer -} - -// AllSegments is used to select for all segments in MembersOpts. -const AllSegments = "_all" - -// MembersOpts is used for querying member information. -type MembersOpts struct { - // WAN is whether to show members from the WAN. - WAN bool - - // Segment is the LAN segment to show members for. Setting this to the - // AllSegments value above will show members in all segments. - Segment string - - Filter string -} - -// AgentServiceRegistration is used to register a new service -type AgentServiceRegistration struct { - Kind ServiceKind `json:",omitempty"` - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - SocketPath string `json:",omitempty"` - TaggedAddresses map[string]ServiceAddress `json:",omitempty"` - EnableTagOverride bool `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - Weights *AgentWeights `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks - Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` - Connect *AgentServiceConnect `json:",omitempty"` - Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` - Partition string `json:",omitempty" bexpr:"-" hash:"ignore"` - Locality *Locality `json:",omitempty" bexpr:"-" hash:"ignore"` -} - -// ServiceRegisterOpts is used to pass extra options to the service register. -type ServiceRegisterOpts struct { - // Missing healthchecks will be deleted from the agent. - // Using this parameter allows to idempotently register a service and its checks without - // having to manually deregister checks. - ReplaceExistingChecks bool - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use WithContext() to set the context. - ctx context.Context -} - -// WithContext sets the context to be used for the request on a new ServiceRegisterOpts, -// and returns the opts. -func (o ServiceRegisterOpts) WithContext(ctx context.Context) ServiceRegisterOpts { - o.ctx = ctx - return o -} - -// AgentCheckRegistration is used to register a new check -type AgentCheckRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Notes string `json:",omitempty"` - ServiceID string `json:",omitempty"` - AgentServiceCheck - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` -} - -// AgentServiceCheck is used to define a node or service level check -type AgentServiceCheck struct { - CheckID string `json:",omitempty"` - Name string `json:",omitempty"` - Args []string `json:"ScriptArgs,omitempty"` - DockerContainerID string `json:",omitempty"` - Shell string `json:",omitempty"` // Only supported for Docker. - Interval string `json:",omitempty"` - Timeout string `json:",omitempty"` - TTL string `json:",omitempty"` - HTTP string `json:",omitempty"` - Header map[string][]string `json:",omitempty"` - Method string `json:",omitempty"` - Body string `json:",omitempty"` - TCP string `json:",omitempty"` - TCPUseTLS bool `json:",omitempty"` - UDP string `json:",omitempty"` - Status string `json:",omitempty"` - Notes string `json:",omitempty"` - TLSServerName string `json:",omitempty"` - TLSSkipVerify bool `json:",omitempty"` - GRPC string `json:",omitempty"` - GRPCUseTLS bool `json:",omitempty"` - H2PING string `json:",omitempty"` - H2PingUseTLS bool `json:",omitempty"` - AliasNode string `json:",omitempty"` - AliasService string `json:",omitempty"` - SuccessBeforePassing int `json:",omitempty"` - FailuresBeforeWarning int `json:",omitempty"` - FailuresBeforeCritical int `json:",omitempty"` - - // In Consul 0.7 and later, checks that are associated with a service - // may also contain this optional DeregisterCriticalServiceAfter field, - // which is a timeout in the same Go time format as Interval and TTL. If - // a check is in the critical state for more than this configured value, - // then its associated service (and all of its associated checks) will - // automatically be deregistered. - DeregisterCriticalServiceAfter string `json:",omitempty"` -} -type AgentServiceChecks []*AgentServiceCheck - -// AgentToken is used when updating ACL tokens for an agent. -type AgentToken struct { - Token string -} - -// Metrics info is used to store different types of metric values from the agent. -type MetricsInfo struct { - Timestamp string - Gauges []GaugeValue - Points []PointValue - Counters []SampledValue - Samples []SampledValue -} - -// GaugeValue stores one value that is updated as time goes on, such as -// the amount of memory allocated. -type GaugeValue struct { - Name string - Value float32 - Labels map[string]string -} - -// PointValue holds a series of points for a metric. -type PointValue struct { - Name string - Points []float32 -} - -// SampledValue stores info about a metric that is incremented over time, -// such as the number of requests to an HTTP endpoint. -type SampledValue struct { - Name string - Count int - Sum float64 - Min float64 - Max float64 - Mean float64 - Stddev float64 - Labels map[string]string -} - -// AgentAuthorizeParams are the request parameters for authorizing a request. -type AgentAuthorizeParams struct { - Target string - ClientCertURI string - ClientCertSerial string -} - -// AgentAuthorize is the response structure for Connect authorization. -type AgentAuthorize struct { - Authorized bool - Reason string -} - -// ConnectProxyConfig is the response structure for agent-local proxy -// configuration. -type ConnectProxyConfig struct { - ProxyServiceID string - TargetServiceID string - TargetServiceName string - ContentHash string - Config map[string]interface{} `bexpr:"-"` - Upstreams []Upstream -} - -// Upstream is the response structure for a proxy upstream configuration. -type Upstream struct { - DestinationType UpstreamDestType `json:",omitempty"` - DestinationPartition string `json:",omitempty"` - DestinationNamespace string `json:",omitempty"` - DestinationPeer string `json:",omitempty"` - DestinationName string - Datacenter string `json:",omitempty"` - LocalBindAddress string `json:",omitempty"` - LocalBindPort int `json:",omitempty"` - LocalBindSocketPath string `json:",omitempty"` - LocalBindSocketMode string `json:",omitempty"` - Config map[string]interface{} `json:",omitempty" bexpr:"-"` - MeshGateway MeshGatewayConfig `json:",omitempty"` - CentrallyConfigured bool `json:",omitempty" bexpr:"-"` -} - -// Agent can be used to query the Agent endpoints -type Agent struct { - c *Client - - // cache the node name - nodeName string -} - -// Agent returns a handle to the agent endpoints -func (c *Client) Agent() *Agent { - return &Agent{c: c} -} - -// Self is used to query the agent we are speaking to for -// information about itself -func (a *Agent) Self() (map[string]map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/self") - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out map[string]map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Host is used to retrieve information about the host the -// agent is running on such as CPU, memory, and disk. Requires -// a operator:read ACL token. -func (a *Agent) Host() (map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/host") - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Version is used to retrieve information about the running Consul version and build. -func (a *Agent) Version() (map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/version") - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Metrics is used to query the agent we are speaking to for -// its current internal metric data -func (a *Agent) Metrics() (*MetricsInfo, error) { - r := a.c.newRequest("GET", "/v1/agent/metrics") - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out *MetricsInfo - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// MetricsStream returns an io.ReadCloser which will emit a stream of metrics -// until the context is cancelled. The metrics are json encoded. -// The caller is responsible for closing the returned io.ReadCloser. -func (a *Agent) MetricsStream(ctx context.Context) (io.ReadCloser, error) { - r := a.c.newRequest("GET", "/v1/agent/metrics/stream") - r.ctx = ctx - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - if err := requireOK(resp); err != nil { - return nil, err - } - return resp.Body, nil -} - -// Reload triggers a configuration reload for the agent we are connected to. -func (a *Agent) Reload() error { - r := a.c.newRequest("PUT", "/v1/agent/reload") - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// NodeName is used to get the node name of the agent -func (a *Agent) NodeName() (string, error) { - if a.nodeName != "" { - return a.nodeName, nil - } - info, err := a.Self() - if err != nil { - return "", err - } - name := info["Config"]["NodeName"].(string) - a.nodeName = name - return name, nil -} - -// Checks returns the locally registered checks -func (a *Agent) Checks() (map[string]*AgentCheck, error) { - return a.ChecksWithFilter("") -} - -// ChecksWithFilter returns a subset of the locally registered checks that match -// the given filter expression -func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) { - return a.ChecksWithFilterOpts(filter, nil) -} - -// ChecksWithFilterOpts returns a subset of the locally registered checks that match -// the given filter expression and QueryOptions. -func (a *Agent) ChecksWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentCheck, error) { - r := a.c.newRequest("GET", "/v1/agent/checks") - r.setQueryOptions(q) - r.filterQuery(filter) - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out map[string]*AgentCheck - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Services returns the locally registered services -func (a *Agent) Services() (map[string]*AgentService, error) { - return a.ServicesWithFilter("") -} - -// ServicesWithFilter returns a subset of the locally registered services that match -// the given filter expression -func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) { - return a.ServicesWithFilterOpts(filter, nil) -} - -// ServicesWithFilterOpts returns a subset of the locally registered services that match -// the given filter expression and QueryOptions. -func (a *Agent) ServicesWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentService, error) { - r := a.c.newRequest("GET", "/v1/agent/services") - r.setQueryOptions(q) - r.filterQuery(filter) - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out map[string]*AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return out, nil -} - -// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any -// - If the service is not found, will return status (critical, nil, nil) -// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil) -// - In all other cases, will return an error -func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) { - return a.AgentHealthServiceByIDOpts(serviceID, nil) -} - -func (a *Agent) AgentHealthServiceByIDOpts(serviceID string, q *QueryOptions) (string, *AgentServiceChecksInfo, error) { - path := fmt.Sprintf("/v1/agent/health/service/id/%v", serviceID) - r := a.c.newRequest("GET", path) - r.setQueryOptions(q) - r.params.Add("format", "json") - r.header.Set("Accept", "application/json") - // not a lot of value in wrapping the doRequest call in a requireHttpCodes call - // we manipulate the resp body and the require calls "swallow" the content on err - _, resp, err := a.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - // Service not Found - if resp.StatusCode == http.StatusNotFound { - return HealthCritical, nil, nil - } - var out *AgentServiceChecksInfo - if err := decodeBody(resp, &out); err != nil { - return HealthCritical, out, err - } - switch resp.StatusCode { - case http.StatusOK: - return HealthPassing, out, nil - case http.StatusTooManyRequests: - return HealthWarning, out, nil - case http.StatusServiceUnavailable: - return HealthCritical, out, nil - } - return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) -} - -// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services -// having the specified name. -// - If no service is not found, will return status (critical, [], nil) -// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil) -// - In all other cases, will return an error -func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) { - return a.AgentHealthServiceByNameOpts(service, nil) -} - -func (a *Agent) AgentHealthServiceByNameOpts(service string, q *QueryOptions) (string, []AgentServiceChecksInfo, error) { - path := fmt.Sprintf("/v1/agent/health/service/name/%v", service) - r := a.c.newRequest("GET", path) - r.setQueryOptions(q) - r.params.Add("format", "json") - r.header.Set("Accept", "application/json") - // not a lot of value in wrapping the doRequest call in a requireHttpCodes call - // we manipulate the resp body and the require calls "swallow" the content on err - _, resp, err := a.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - // Service not Found - if resp.StatusCode == http.StatusNotFound { - return HealthCritical, nil, nil - } - var out []AgentServiceChecksInfo - if err := decodeBody(resp, &out); err != nil { - return HealthCritical, out, err - } - switch resp.StatusCode { - case http.StatusOK: - return HealthPassing, out, nil - case http.StatusTooManyRequests: - return HealthWarning, out, nil - case http.StatusServiceUnavailable: - return HealthCritical, out, nil - } - return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) -} - -// Service returns a locally registered service instance and allows for -// hash-based blocking. -// -// Note that this uses an unconventional blocking mechanism since it's -// agent-local state. That means there is no persistent raft index so we block -// based on object hash instead. -func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return out, qm, nil -} - -// Members returns the known gossip members. The WAN -// flag can be used to query a server for WAN members. -func (a *Agent) Members(wan bool) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - if wan { - r.params.Set("wan", "1") - } - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// MembersOpts returns the known gossip members and can be passed -// additional options for WAN/segment filtering. -func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - r.params.Set("segment", opts.Segment) - if opts.WAN { - r.params.Set("wan", "1") - } - - if opts.Filter != "" { - r.params.Set("filter", opts.Filter) - } - - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// ServiceRegister is used to register a new service with -// the local agent -func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { - opts := ServiceRegisterOpts{ - ReplaceExistingChecks: false, - } - - return a.serviceRegister(service, opts) -} - -// ServiceRegister is used to register a new service with -// the local agent and can be passed additional options. -func (a *Agent) ServiceRegisterOpts(service *AgentServiceRegistration, opts ServiceRegisterOpts) error { - return a.serviceRegister(service, opts) -} - -func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceRegisterOpts) error { - r := a.c.newRequest("PUT", "/v1/agent/service/register") - r.obj = service - r.ctx = opts.ctx - if opts.ReplaceExistingChecks { - r.params.Set("replace-existing-checks", "true") - } - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// ServiceDeregister is used to deregister a service with -// the local agent -func (a *Agent) ServiceDeregister(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// ServiceDeregisterOpts is used to deregister a service with -// the local agent with QueryOptions. -func (a *Agent) ServiceDeregisterOpts(serviceID string, q *QueryOptions) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - r.setQueryOptions(q) - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// PassTTL is used to set a TTL check to the passing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) PassTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "pass") -} - -// WarnTTL is used to set a TTL check to the warning state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) WarnTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "warn") -} - -// FailTTL is used to set a TTL check to the failing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) FailTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "fail") -} - -// updateTTL is used to update the TTL of a check. This is the internal -// method that uses the old API that's present in Consul versions prior to -// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed -// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, -// but keep the old Pass/Warn/Fail methods using the old API under the hood. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 and the server endpoints will -// be removed in 0.9. -func (a *Agent) updateTTL(checkID, note, status string) error { - switch status { - case "pass": - case "warn": - case "fail": - default: - return fmt.Errorf("Invalid status: %s", status) - } - endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) - r := a.c.newRequest("PUT", endpoint) - r.params.Set("note", note) - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// checkUpdate is the payload for a PUT for a check update. -type checkUpdate struct { - // Status is one of the api.Health* states: HealthPassing - // ("passing"), HealthWarning ("warning"), or HealthCritical - // ("critical"). - Status string - - // Output is the information to post to the UI for operators as the - // output of the process that decided to hit the TTL check. This is - // different from the note field that's associated with the check - // itself. - Output string -} - -// UpdateTTL is used to update the TTL of a check. This uses the newer API -// that was introduced in Consul 0.6.4 and later. We translate the old status -// strings for compatibility (though a newer version of Consul will still be -// required to use this API). -func (a *Agent) UpdateTTL(checkID, output, status string) error { - return a.UpdateTTLOpts(checkID, output, status, nil) -} - -func (a *Agent) UpdateTTLOpts(checkID, output, status string, q *QueryOptions) error { - switch status { - case "pass", HealthPassing: - status = HealthPassing - case "warn", HealthWarning: - status = HealthWarning - case "fail", HealthCritical: - status = HealthCritical - default: - return fmt.Errorf("Invalid status: %s", status) - } - - endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) - r := a.c.newRequest("PUT", endpoint) - r.setQueryOptions(q) - r.obj = &checkUpdate{ - Status: status, - Output: output, - } - - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// CheckRegister is used to register a new check with -// the local agent -func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/check/register") - r.obj = check - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// CheckDeregister is used to deregister a check with -// the local agent -func (a *Agent) CheckDeregister(checkID string) error { - return a.CheckDeregisterOpts(checkID, nil) -} - -// CheckDeregisterOpts is used to deregister a check with -// the local agent using query options -func (a *Agent) CheckDeregisterOpts(checkID string, q *QueryOptions) error { - r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) - r.setQueryOptions(q) - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// Join is used to instruct the agent to attempt a join to -// another cluster member -func (a *Agent) Join(addr string, wan bool) error { - r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) - if wan { - r.params.Set("wan", "1") - } - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// Leave is used to have the agent gracefully leave the cluster and shutdown -func (a *Agent) Leave() error { - r := a.c.newRequest("PUT", "/v1/agent/leave") - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -type ForceLeaveOpts struct { - // Prune indicates if we should remove a failed agent from the list of - // members in addition to ejecting it. - Prune bool - - // WAN indicates that the request should exclusively target the WAN pool. - WAN bool -} - -// ForceLeave is used to have the agent eject a failed node -func (a *Agent) ForceLeave(node string) error { - return a.ForceLeaveOpts(node, ForceLeaveOpts{}) -} - -// ForceLeavePrune is used to have an a failed agent removed -// from the list of members -func (a *Agent) ForceLeavePrune(node string) error { - return a.ForceLeaveOpts(node, ForceLeaveOpts{Prune: true}) -} - -// ForceLeaveOpts is used to have the agent eject a failed node or remove it -// completely from the list of members. -// -// DEPRECATED - Use ForceLeaveOptions instead. -func (a *Agent) ForceLeaveOpts(node string, opts ForceLeaveOpts) error { - return a.ForceLeaveOptions(node, opts, nil) -} - -// ForceLeaveOptions is used to have the agent eject a failed node or remove it -// completely from the list of members. Allows usage of QueryOptions on-top of ForceLeaveOpts -func (a *Agent) ForceLeaveOptions(node string, opts ForceLeaveOpts, q *QueryOptions) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - r.setQueryOptions(q) - if opts.Prune { - r.params.Set("prune", "1") - } - if opts.WAN { - r.params.Set("wan", "1") - } - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// ConnectAuthorize is used to authorize an incoming connection -// to a natively integrated Connect service. -func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) { - r := a.c.newRequest("POST", "/v1/agent/connect/authorize") - r.obj = auth - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - var out AgentAuthorize - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// ConnectCARoots returns the list of roots. -func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots") - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out CARootList - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// ConnectCALeaf gets the leaf certificate for the given service ID. -func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) - r.setQueryOptions(q) - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out LeafCert - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// EnableServiceMaintenance toggles service maintenance mode on -// for the given service ID. -func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { - return a.EnableServiceMaintenanceOpts(serviceID, reason, nil) -} - -func (a *Agent) EnableServiceMaintenanceOpts(serviceID, reason string, q *QueryOptions) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.setQueryOptions(q) - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// DisableServiceMaintenance toggles service maintenance mode off -// for the given service ID. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - return a.DisableServiceMaintenanceOpts(serviceID, nil) -} - -func (a *Agent) DisableServiceMaintenanceOpts(serviceID string, q *QueryOptions) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.setQueryOptions(q) - r.params.Set("enable", "false") - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// EnableNodeMaintenance toggles node maintenance mode on for the -// agent we are connected to. -func (a *Agent) EnableNodeMaintenance(reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// DisableNodeMaintenance toggles node maintenance mode off for the -// agent we are connected to. -func (a *Agent) DisableNodeMaintenance() error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "false") - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// Monitor returns a channel which will receive streaming logs from the agent -// Providing a non-nil stopCh can be used to close the connection and stop the -// log stream. An empty string will be sent down the given channel when there's -// nothing left to stream, after which the caller should close the stopCh. -func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { - return a.monitor(loglevel, false, stopCh, q) -} - -// MonitorJSON is like Monitor except it returns logs in JSON format. -func (a *Agent) MonitorJSON(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { - return a.monitor(loglevel, true, stopCh, q) -} - -func (a *Agent) monitor(loglevel string, logJSON bool, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { - r := a.c.newRequest("GET", "/v1/agent/monitor") - r.setQueryOptions(q) - if loglevel != "" { - r.params.Add("loglevel", loglevel) - } - if logJSON { - r.params.Set("logjson", "true") - } - _, resp, err := a.c.doRequest(r) - if err != nil { - return nil, err - } - if err := requireOK(resp); err != nil { - return nil, err - } - logCh := make(chan string, 64) - go func() { - defer closeResponseBody(resp) - scanner := bufio.NewScanner(resp.Body) - for { - select { - case <-stopCh: - close(logCh) - return - default: - } - if scanner.Scan() { - // An empty string signals to the caller that - // the scan is done, so make sure we only emit - // that when the scanner says it's done, not if - // we happen to ingest an empty line. - if text := scanner.Text(); text != "" { - logCh <- text - } else { - logCh <- " " - } - } else { - logCh <- "" - } - } - }() - return logCh, nil -} - -// UpdateACLToken updates the agent's "acl_token". See updateToken for more -// details. Deprecated in Consul 1.4. -// -// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above -func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") -} - -// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken -// for more details. Deprecated in Consul 1.4. -// -// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above -func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { - return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") -} - -// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See -// updateToken for more details. Deprecated in Consul 1.4. -// -// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above -func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { - return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") -} - -// UpdateACLReplicationToken updates the agent's "acl_replication_token". See -// updateToken for more details. Deprecated in Consul 1.4. -// -// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above -func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { - return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") -} - -// UpdateDefaultACLToken updates the agent's "default" token. See updateToken -// for more details -func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback(token, q, "default", "acl_token") -} - -// UpdateAgentACLToken updates the agent's "agent" token. See updateToken -// for more details -func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback(token, q, "agent", "acl_agent_token") -} - -// UpdateAgentRecoveryACLToken updates the agent's "agent_recovery" token. See updateToken -// for more details. -func (a *Agent) UpdateAgentRecoveryACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback(token, q, "agent_recovery", "agent_master", "acl_agent_master_token") -} - -// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken -// for more details. -// -// DEPRECATED - Prefer UpdateAgentRecoveryACLToken for v1.11 and above. -func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback(token, q, "agent_master", "acl_agent_master_token") -} - -// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken -// for more details -func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback(token, q, "replication", "acl_replication_token") -} - -// UpdateConfigFileRegistrationToken updates the agent's "replication" token. See updateToken -// for more details -func (a *Agent) UpdateConfigFileRegistrationToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("config_file_service_registration", token, q) -} - -// updateToken can be used to update one of an agent's ACL tokens after the agent has -// started. The tokens are may not be persisted, so will need to be updated again if -// the agent is restarted unless the agent is configured to persist them. -func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { - meta, _, err := a.updateTokenOnce(target, token, q) - return meta, err -} - -func (a *Agent) updateTokenFallback(token string, q *WriteOptions, targets ...string) (*WriteMeta, error) { - if len(targets) == 0 { - panic("targets must not be empty") - } - - var ( - meta *WriteMeta - err error - ) - for _, target := range targets { - var status int - meta, status, err = a.updateTokenOnce(target, token, q) - if err == nil && status != http.StatusNotFound { - return meta, err - } - } - return meta, err -} - -func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) { - r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) - r.setWriteOptions(q) - r.obj = &AgentToken{Token: token} - - rtt, resp, err := a.c.doRequest(r) - if err != nil { - return nil, 500, err - } - defer closeResponseBody(resp) - wm := &WriteMeta{RequestTime: rtt} - if err := requireOK(resp); err != nil { - var statusE StatusError - if errors.As(err, &statusE) { - return wm, statusE.Code, statusE - } - return nil, 0, err - } - return wm, resp.StatusCode, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go deleted file mode 100644 index f62c0c5a1bf..00000000000 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ /dev/null @@ -1,1277 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-rootcerts" -) - -const ( - // HTTPAddrEnvName defines an environment variable name which sets - // the HTTP address if there is no -http-addr specified. - HTTPAddrEnvName = "CONSUL_HTTP_ADDR" - - // HTTPTokenEnvName defines an environment variable name which sets - // the HTTP token. - HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" - - // HTTPTokenFileEnvName defines an environment variable name which sets - // the HTTP token file. - HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE" - - // HTTPAuthEnvName defines an environment variable name which sets - // the HTTP authentication header. - HTTPAuthEnvName = "CONSUL_HTTP_AUTH" - - // HTTPSSLEnvName defines an environment variable name which sets - // whether or not to use HTTPS. - HTTPSSLEnvName = "CONSUL_HTTP_SSL" - - // HTTPCAFile defines an environment variable name which sets the - // CA file to use for talking to Consul over TLS. - HTTPCAFile = "CONSUL_CACERT" - - // HTTPCAPath defines an environment variable name which sets the - // path to a directory of CA certs to use for talking to Consul over TLS. - HTTPCAPath = "CONSUL_CAPATH" - - // HTTPClientCert defines an environment variable name which sets the - // client cert file to use for talking to Consul over TLS. - HTTPClientCert = "CONSUL_CLIENT_CERT" - - // HTTPClientKey defines an environment variable name which sets the - // client key file to use for talking to Consul over TLS. - HTTPClientKey = "CONSUL_CLIENT_KEY" - - // HTTPTLSServerName defines an environment variable name which sets the - // server name to use as the SNI host when connecting via TLS - HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" - - // HTTPSSLVerifyEnvName defines an environment variable name which sets - // whether or not to disable certificate checking. - HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" - - // GRPCAddrEnvName defines an environment variable name which sets the gRPC - // address for consul connect envoy. Note this isn't actually used by the api - // client in this package but is defined here for consistency with all the - // other ENV names we use. - GRPCAddrEnvName = "CONSUL_GRPC_ADDR" - - // GRPCCAFileEnvName defines an environment variable name which sets the - // CA file to use for talking to Consul gRPC over TLS. - GRPCCAFileEnvName = "CONSUL_GRPC_CACERT" - - // GRPCCAPathEnvName defines an environment variable name which sets the - // path to a directory of CA certs to use for talking to Consul gRPC over TLS. - GRPCCAPathEnvName = "CONSUL_GRPC_CAPATH" - - // HTTPNamespaceEnvVar defines an environment variable name which sets - // the HTTP Namespace to be used by default. This can still be overridden. - HTTPNamespaceEnvName = "CONSUL_NAMESPACE" - - // HTTPPartitionEnvName defines an environment variable name which sets - // the HTTP Partition to be used by default. This can still be overridden. - HTTPPartitionEnvName = "CONSUL_PARTITION" - - // QueryBackendStreaming Query backend of type streaming - QueryBackendStreaming = "streaming" - - // QueryBackendBlockingQuery Query backend of type blocking query - QueryBackendBlockingQuery = "blocking-query" -) - -type StatusError struct { - Code int - Body string -} - -func (e StatusError) Error() string { - return fmt.Sprintf("Unexpected response code: %d (%s)", e.Code, e.Body) -} - -// QueryOptions are used to parameterize a query -type QueryOptions struct { - // Namespace overrides the `default` namespace - // Note: Namespaces are available only in Consul Enterprise - Namespace string - - // Partition overrides the `default` partition - // Note: Partitions are available only in Consul Enterprise - Partition string - - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Providing a peer name in the query option - Peer string - - // AllowStale allows any Consul server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // RequireConsistent forces the read to be fully consistent. - // This is more expensive but prevents ever performing a stale - // read. - RequireConsistent bool - - // UseCache requests that the agent cache results locally. See - // https://www.consul.io/api/features/caching.html for more details on the - // semantics. - UseCache bool - - // MaxAge limits how old a cached value will be returned if UseCache is true. - // If there is a cached response that is older than the MaxAge, it is treated - // as a cache miss and a new fetch invoked. If the fetch fails, the error is - // returned. Clients that wish to allow for stale results on error can set - // StaleIfError to a longer duration to change this behavior. It is ignored - // if the endpoint supports background refresh caching. See - // https://www.consul.io/api/features/caching.html for more details. - MaxAge time.Duration - - // StaleIfError specifies how stale the client will accept a cached response - // if the servers are unavailable to fetch a fresh one. Only makes sense when - // UseCache is true and MaxAge is set to a lower, non-zero value. It is - // ignored if the endpoint supports background refresh caching. See - // https://www.consul.io/api/features/caching.html for more details. - StaleIfError time.Duration - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitHash is used by some endpoints instead of WaitIndex to perform blocking - // on state based on a hash of the response rather than a monotonic index. - // This is required when the state being blocked on is not stored in Raft, for - // example agent-local proxy configuration. - WaitHash string - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overridden. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // Near is used to provide a node name that will sort the results - // in ascending order based on the estimated round trip time from - // that node. Setting this to "_agent" will use the agent's node - // for the sort. - Near string - - // NodeMeta is used to filter results by nodes with the given - // metadata key/value pairs. Currently, only one key/value pair can - // be provided for filtering. - NodeMeta map[string]string - - // RelayFactor is used in keyring operations to cause responses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // LocalOnly is used in keyring list operation to force the keyring - // query to only hit local servers (no WAN traffic). - LocalOnly bool - - // Connect filters prepared query execution to only include Connect-capable - // services. This currently affects prepared query execution. - Connect bool - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context - - // Filter requests filtering data prior to it being returned. The string - // is a go-bexpr compatible expression. - Filter string - - // MergeCentralConfig returns a service definition merged with the - // proxy-defaults/global and service-defaults/:service config entries. - // This can be used to ensure a full service definition is returned in the response - // especially when the service might not be written into the catalog that way. - MergeCentralConfig bool - - // Global is used to request information from all datacenters. Currently only - // used for operator usage requests. - Global bool -} - -func (o *QueryOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { - o2 := new(QueryOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// WriteOptions are used to parameterize a write -type WriteOptions struct { - // Namespace overrides the `default` namespace - // Note: Namespaces are available only in Consul Enterprise - Namespace string - - // Partition overrides the `default` partition - // Note: Partitions are available only in Consul Enterprise - Partition string - - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // RelayFactor is used in keyring operations to cause responses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -func (o *WriteOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { - o2 := new(WriteOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // LastContentHash. This can be used as a WaitHash to perform a blocking query - // for endpoints that support hash-based blocking. Endpoints that do not - // support it will return an empty hash. - LastContentHash string - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration - - // Is address translation enabled for HTTP responses on this agent - AddressTranslationEnabled bool - - // CacheHit is true if the result was served from agent-local cache. - CacheHit bool - - // CacheAge is set if request was ?cached and indicates how stale the cached - // response is. - CacheAge time.Duration - - // QueryBackend represent which backend served the request. - QueryBackend string - - // DefaultACLPolicy is used to control the ACL interaction when there is no - // defined policy. This can be "allow" which means ACLs are used to - // deny-list, or "deny" which means ACLs are allow-lists. - DefaultACLPolicy string - - // ResultsFilteredByACLs is true when some of the query's results were - // filtered out by enforcing ACLs. It may be false because nothing was - // removed, or because the endpoint does not yet support this flag. - ResultsFilteredByACLs bool -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Consul server - Address string - - // Scheme is the URI scheme for the Consul server - Scheme string - - // Prefix for URIs for when consul is behind an API gateway (reverse - // proxy). The API gateway must strip off the PathPrefix before - // passing the request onto consul. - PathPrefix string - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string - - // Transport is the Transport to use for the http client. - Transport *http.Transport - - // HttpClient is the client to use. Default will be - // used if not provided. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // TokenFile is a file containing the current token to use for this client. - // If provided it is read once at startup and never again. - TokenFile string - - // Namespace is the name of the namespace to send along for the request - // when no other Namespace is present in the QueryOptions - Namespace string - - // Partition is the name of the partition to send along for the request - // when no other Partition is present in the QueryOptions - Partition string - - TLSConfig TLSConfig -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -type TLSConfig struct { - // Address is the optional address of the Consul server. The port, if any - // will be removed from here and this will be set to the ServerName of the - // resulting config. - Address string - - // CAFile is the optional path to the CA certificate used for Consul - // communication, defaults to the system bundle if not specified. - CAFile string - - // CAPath is the optional path to a directory of CA certificates to use for - // Consul communication, defaults to the system bundle if not specified. - CAPath string - - // CAPem is the optional PEM-encoded CA certificate used for Consul - // communication, defaults to the system bundle if not specified. - CAPem []byte - - // CertFile is the optional path to the certificate for Consul - // communication. If this is set then you need to also set KeyFile. - CertFile string - - // CertPEM is the optional PEM-encoded certificate for Consul - // communication. If this is set then you need to also set KeyPEM. - CertPEM []byte - - // KeyFile is the optional path to the private key for Consul communication. - // If this is set then you need to also set CertFile. - KeyFile string - - // KeyPEM is the optional PEM-encoded private key for Consul communication. - // If this is set then you need to also set CertPEM. - KeyPEM []byte - - // InsecureSkipVerify if set to true will disable TLS host verification. - InsecureSkipVerify bool -} - -// DefaultConfig returns a default configuration for the client. By default this -// will pool and reuse idle connections to Consul. If you have a long-lived -// client object, this is the desired behavior and should make the most efficient -// use of the connections to Consul. If you don't reuse a client object, which -// is not recommended, then you may notice idle connections building up over -// time. To avoid this, use the DefaultNonPooledConfig() instead. -func DefaultConfig() *Config { - return defaultConfig(nil, cleanhttp.DefaultPooledTransport) -} - -// DefaultConfigWithLogger returns a default configuration for the client. It -// is exactly the same as DefaultConfig, but allows for a pre-configured logger -// object to be passed through. -func DefaultConfigWithLogger(logger hclog.Logger) *Config { - return defaultConfig(logger, cleanhttp.DefaultPooledTransport) -} - -// DefaultNonPooledConfig returns a default configuration for the client which -// does not pool connections. This isn't a recommended configuration because it -// will reconnect to Consul on every request, but this is useful to avoid the -// accumulation of idle connections if you make many client objects during the -// lifetime of your application. -func DefaultNonPooledConfig() *Config { - return defaultConfig(nil, cleanhttp.DefaultTransport) -} - -// defaultConfig returns the default configuration for the client, using the -// given function to make the transport. -func defaultConfig(logger hclog.Logger, transportFn func() *http.Transport) *Config { - if logger == nil { - logger = hclog.New(&hclog.LoggerOptions{ - Name: "consul-api", - }) - } - - config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - Transport: transportFn(), - } - - if addr := os.Getenv(HTTPAddrEnvName); addr != "" { - config.Address = addr - } - - if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" { - config.TokenFile = tokenFile - } - - if token := os.Getenv(HTTPTokenEnvName); token != "" { - config.Token = token - } - - if auth := os.Getenv(HTTPAuthEnvName); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { - enabled, err := strconv.ParseBool(ssl) - if err != nil { - logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLEnvName), "error", err) - } - - if enabled { - config.Scheme = "https" - } - } - - if v := os.Getenv(HTTPTLSServerName); v != "" { - config.TLSConfig.Address = v - } - if v := os.Getenv(HTTPCAFile); v != "" { - config.TLSConfig.CAFile = v - } - if v := os.Getenv(HTTPCAPath); v != "" { - config.TLSConfig.CAPath = v - } - if v := os.Getenv(HTTPClientCert); v != "" { - config.TLSConfig.CertFile = v - } - if v := os.Getenv(HTTPClientKey); v != "" { - config.TLSConfig.KeyFile = v - } - if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { - doVerify, err := strconv.ParseBool(v) - if err != nil { - logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLVerifyEnvName), "error", err) - } - if !doVerify { - config.TLSConfig.InsecureSkipVerify = true - } - } - - if v := os.Getenv(HTTPNamespaceEnvName); v != "" { - config.Namespace = v - } - - if v := os.Getenv(HTTPPartitionEnvName); v != "" { - config.Partition = v - } - - return config -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { - tlsClientConfig := &tls.Config{ - InsecureSkipVerify: tlsConfig.InsecureSkipVerify, - } - - if tlsConfig.Address != "" { - server := tlsConfig.Address - hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") - if hasPort { - var err error - server, _, err = net.SplitHostPort(server) - if err != nil { - return nil, err - } - } - tlsClientConfig.ServerName = server - } - - if len(tlsConfig.CertPEM) != 0 && len(tlsConfig.KeyPEM) != 0 { - tlsCert, err := tls.X509KeyPair(tlsConfig.CertPEM, tlsConfig.KeyPEM) - if err != nil { - return nil, err - } - tlsClientConfig.Certificates = []tls.Certificate{tlsCert} - } else if len(tlsConfig.CertPEM) != 0 || len(tlsConfig.KeyPEM) != 0 { - return nil, fmt.Errorf("both client cert and client key must be provided") - } - - if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) - if err != nil { - return nil, err - } - tlsClientConfig.Certificates = []tls.Certificate{tlsCert} - } else if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" { - return nil, fmt.Errorf("both client cert and client key must be provided") - } - - if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" || len(tlsConfig.CAPem) != 0 { - rootConfig := &rootcerts.Config{ - CAFile: tlsConfig.CAFile, - CAPath: tlsConfig.CAPath, - CACertificate: tlsConfig.CAPem, - } - if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { - return nil, err - } - } - - return tlsClientConfig, nil -} - -func (c *Config) GenerateEnv() []string { - env := make([]string, 0, 10) - - env = append(env, - fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address), - fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token), - fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile), - fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"), - fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile), - fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath), - fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile), - fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile), - fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address), - fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify)) - - if c.HttpAuth != nil { - env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password)) - } else { - env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName)) - } - - return env -} - -// Client provides a client to the Consul API -type Client struct { - modifyLock sync.RWMutex - headers http.Header - - config Config -} - -// Headers gets the current set of headers used for requests. This returns a -// copy; to modify it call AddHeader or SetHeaders. -func (c *Client) Headers() http.Header { - c.modifyLock.RLock() - defer c.modifyLock.RUnlock() - - if c.headers == nil { - return nil - } - - ret := make(http.Header) - for k, v := range c.headers { - for _, val := range v { - ret[k] = append(ret[k], val) - } - } - - return ret -} - -// AddHeader allows a single header key/value pair to be added -// in a race-safe fashion. -func (c *Client) AddHeader(key, value string) { - c.modifyLock.Lock() - defer c.modifyLock.Unlock() - c.headers.Add(key, value) -} - -// SetHeaders clears all previous headers and uses only the given -// ones going forward. -func (c *Client) SetHeaders(headers http.Header) { - c.modifyLock.Lock() - defer c.modifyLock.Unlock() - c.headers = headers -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if config.Address == "" { - config.Address = defConfig.Address - } - - if config.Scheme == "" { - config.Scheme = defConfig.Scheme - } - - if config.Transport == nil { - config.Transport = defConfig.Transport - } - - if config.TLSConfig.Address == "" { - config.TLSConfig.Address = defConfig.TLSConfig.Address - } - - if config.TLSConfig.CAFile == "" { - config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile - } - - if config.TLSConfig.CAPath == "" { - config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath - } - - if config.TLSConfig.CertFile == "" { - config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile - } - - if config.TLSConfig.KeyFile == "" { - config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile - } - - if !config.TLSConfig.InsecureSkipVerify { - config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify - } - - if config.HttpClient == nil { - var err error - config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) - if err != nil { - return nil, err - } - } - - if config.Namespace == "" { - config.Namespace = defConfig.Namespace - } - - if config.Partition == "" { - config.Partition = defConfig.Partition - } - - parts := strings.SplitN(config.Address, "://", 2) - if len(parts) == 2 { - switch parts[0] { - case "http": - // Never revert to http if TLS was explicitly requested. - case "https": - config.Scheme = "https" - case "unix": - trans := cleanhttp.DefaultTransport() - trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", parts[1]) - } - httpClient, err := NewHttpClient(trans, config.TLSConfig) - if err != nil { - return nil, err - } - config.HttpClient = httpClient - default: - return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) - } - config.Address = parts[1] - - // separate out a reverse proxy prefix, if it is present. - // NOTE: Rewriting this code to use url.Parse() instead of - // strings.SplitN() breaks existing test cases. - switch parts[0] { - case "http", "https": - parts := strings.SplitN(parts[1], "/", 2) - if len(parts) == 2 { - config.Address = parts[0] - config.PathPrefix = "/" + parts[1] - } - } - } - - // If the TokenFile is set, always use that, even if a Token is configured. - // This is because when TokenFile is set it is read into the Token field. - // We want any derived clients to have to re-read the token file. - // The precedence of ACL token should be: - // 1. -token-file cli option - // 2. -token cli option - // 3. CONSUL_HTTP_TOKEN_FILE environment variable - // 4. CONSUL_HTTP_TOKEN environment variable - if config.TokenFile != "" && config.TokenFile != defConfig.TokenFile { - data, err := os.ReadFile(config.TokenFile) - if err != nil { - return nil, fmt.Errorf("Error loading token file %s : %s", config.TokenFile, err) - } - - if token := strings.TrimSpace(string(data)); token != "" { - config.Token = token - } - } else if config.Token != "" && defConfig.Token != config.Token { - // Fall through - } else if defConfig.TokenFile != "" { - data, err := os.ReadFile(defConfig.TokenFile) - if err != nil { - return nil, fmt.Errorf("Error loading token file %s : %s", defConfig.TokenFile, err) - } - - if token := strings.TrimSpace(string(data)); token != "" { - config.Token = token - config.TokenFile = defConfig.TokenFile - } - } else { - config.Token = defConfig.Token - } - return &Client{config: *config, headers: make(http.Header)}, nil -} - -// NewHttpClient returns an http client configured with the given Transport and TLS -// config. -func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { - client := &http.Client{ - Transport: transport, - } - - // TODO (slackpad) - Once we get some run time on the HTTP/2 support we - // should turn it on by default if TLS is enabled. We would basically - // just need to call http2.ConfigureTransport(transport) here. We also - // don't want to introduce another external dependency on - // golang.org/x/net/http2 at this time. For a complete recipe for how - // to enable HTTP/2 support on a transport suitable for the API client - // library see agent/http_test.go:TestHTTPServer_H2. - - if transport.TLSClientConfig == nil { - tlsClientConfig, err := SetupTLSConfig(&tlsConf) - - if err != nil { - return nil, err - } - - transport.TLSClientConfig = tlsClientConfig - } - - return client, nil -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - body io.Reader - header http.Header - obj interface{} - ctx context.Context -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Namespace != "" { - // For backwards-compatibility with existing tests, - // use the short-hand query param name "ns" - // rather than the alternative long-hand "namespace" - r.params.Set("ns", q.Namespace) - } - if q.Partition != "" { - // For backwards-compatibility with existing tests, - // use the long-hand query param name "partition" - // rather than the alternative short-hand "ap" - r.params.Set("partition", q.Partition) - } - if q.Datacenter != "" { - // For backwards-compatibility with existing tests, - // use the short-hand query param name "dc" - // rather than the alternative long-hand "datacenter" - r.params.Set("dc", q.Datacenter) - } - if q.Peer != "" { - r.params.Set("peer", q.Peer) - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.RequireConsistent { - r.params.Set("consistent", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.WaitHash != "" { - r.params.Set("hash", q.WaitHash) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.Near != "" { - r.params.Set("near", q.Near) - } - if q.Filter != "" { - r.params.Set("filter", q.Filter) - } - if len(q.NodeMeta) > 0 { - for key, value := range q.NodeMeta { - r.params.Add("node-meta", key+":"+value) - } - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - if q.LocalOnly { - r.params.Set("local-only", fmt.Sprintf("%t", q.LocalOnly)) - } - if q.Connect { - r.params.Set("connect", "true") - } - if q.UseCache && !q.RequireConsistent { - r.params.Set("cached", "") - - cc := []string{} - if q.MaxAge > 0 { - cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds())) - } - if q.StaleIfError > 0 { - cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds())) - } - if len(cc) > 0 { - r.header.Set("Cache-Control", strings.Join(cc, ", ")) - } - } - if q.MergeCentralConfig { - r.params.Set("merge-central-config", "") - } - if q.Global { - r.params.Set("global", "") - } - - r.ctx = q.ctx -} - -// durToMsec converts a duration to a millisecond specified string. If the -// user selected a positive value that rounds to 0 ms, then we will use 1 ms -// so they get a short delay, otherwise Consul will translate the 0 ms into -// a huge default delay. -func durToMsec(dur time.Duration) string { - ms := dur / time.Millisecond - if dur > 0 && ms == 0 { - ms = 1 - } - return fmt.Sprintf("%dms", ms) -} - -// serverError is a string we look for to detect 500 errors. -const serverError = "Unexpected response code: 500" - -// IsRetryableError returns true for 500 errors from the Consul servers, and -// network connection errors. These are usually retryable at a later time. -// This applies to reads but NOT to writes. This may return true for errors -// on writes that may have still gone through, so do not use this to retry -// any write operations. -func IsRetryableError(err error) bool { - if err == nil { - return false - } - - if _, ok := err.(net.Error); ok { - return true - } - - // TODO (slackpad) - Make a real error type here instead of using - // a string check. - return strings.Contains(err.Error(), serverError) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - // For backwards-compatibility, continue to use the shorthand "ns" - // rather than "namespace" - if q.Namespace != "" { - r.params.Set("ns", q.Namespace) - } - if q.Partition != "" { - r.params.Set("partition", q.Partition) - } - // For backwards-compatibility, continue to use the shorthand "dc" - // rather than "datacenter" - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - r.ctx = q.ctx -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - b, err := encodeBody(r.obj) - if err != nil { - return nil, err - } - r.body = b - } - - // Create the HTTP request - req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - // validate that socket communications that do not use the host, detect - // slashes in the host name and replace it with local host. - // this is required since go started validating req.host in 1.20.6 and 1.19.11. - // prior to that they would strip out the slashes for you. They removed that - // behavior and added more strict validation as part of a CVE. - // This issue is being tracked by the Go team: - // https://github.com/golang/go/issues/61431 - // If there is a resolution in this issue, we will remove this code. - // In the time being, this is the accepted workaround. - if strings.HasPrefix(r.url.Host, "/") { - r.url.Host = "localhost" - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - req.Header = r.header - - // Content-Type must always be set when a body is present - // See https://github.com/hashicorp/consul/issues/10011 - if req.Body != nil && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "application/json") - } - - // Setup auth - if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - if r.ctx != nil { - return req.WithContext(r.ctx), nil - } - - return req, nil -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: c.config.Scheme, - Host: c.config.Address, - Path: c.config.PathPrefix + path, - }, - params: make(map[string][]string), - header: c.Headers(), - } - - if c.config.Datacenter != "" { - r.params.Set("dc", c.config.Datacenter) - } - if c.config.Namespace != "" { - r.params.Set("ns", c.config.Namespace) - } - if c.config.Partition != "" { - r.params.Set("partition", c.config.Partition) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.Token != "" { - r.header.Set("X-Consul-Token", r.config.Token) - } - return r -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - start := time.Now() - resp, err := c.config.HttpClient.Do(req) - diff := time.Since(start) - return diff, resp, err -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r := c.newRequest("GET", endpoint) - r.setQueryOptions(q) - rtt, resp, err := c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r := c.newRequest("PUT", endpoint) - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } else if _, err := io.ReadAll(resp.Body); err != nil { - return nil, err - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -// -// TODO(rb): bug? the error from this function is never handled -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Consul-Index (if it's set - hash based blocking queries don't - // set this) - if indexStr := header.Get("X-Consul-Index"); indexStr != "" { - index, err := strconv.ParseUint(indexStr, 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) - } - q.LastIndex = index - } - q.LastContentHash = header.Get("X-Consul-ContentHash") - - // Parse the X-Consul-LastContact - last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - - // Parse the X-Consul-KnownLeader - switch header.Get("X-Consul-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - - // Parse X-Consul-Translate-Addresses - switch header.Get("X-Consul-Translate-Addresses") { - case "true": - q.AddressTranslationEnabled = true - default: - q.AddressTranslationEnabled = false - } - - // Parse X-Consul-Default-ACL-Policy - switch v := header.Get("X-Consul-Default-ACL-Policy"); v { - case "allow", "deny": - q.DefaultACLPolicy = v - } - - // Parse the X-Consul-Results-Filtered-By-ACLs - switch header.Get("X-Consul-Results-Filtered-By-ACLs") { - case "true": - q.ResultsFilteredByACLs = true - default: - q.ResultsFilteredByACLs = false - } - - // Parse Cache info - if cacheStr := header.Get("X-Cache"); cacheStr != "" { - q.CacheHit = strings.EqualFold(cacheStr, "HIT") - } - if ageStr := header.Get("Age"); ageStr != "" { - age, err := strconv.ParseUint(ageStr, 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse Age Header: %v", err) - } - q.CacheAge = time.Duration(age) * time.Second - } - - switch v := header.Get("X-Consul-Query-Backend"); v { - case QueryBackendStreaming, QueryBackendBlockingQuery: - q.QueryBackend = v - } - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// encodeBody is used to encode a request body -func encodeBody(obj interface{}) (io.Reader, error) { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(resp *http.Response) error { - return requireHttpCodes(resp, 200) -} - -// requireHttpCodes checks for the "allowable" http codes for a response -func requireHttpCodes(resp *http.Response, httpCodes ...int) error { - // if there is an http code that we require, return w no error - for _, httpCode := range httpCodes { - if resp.StatusCode == httpCode { - return nil - } - } - - // if we reached here, then none of the http codes in resp matched any that we expected - // so err out - return generateUnexpectedResponseCodeError(resp) -} - -// closeResponseBody reads resp.Body until EOF, and then closes it. The read -// is necessary to ensure that the http.Client's underlying RoundTripper is able -// to re-use the TCP connection. See godoc on net/http.Client.Do. -func closeResponseBody(resp *http.Response) error { - _, _ = io.Copy(io.Discard, resp.Body) - return resp.Body.Close() -} - -func (req *request) filterQuery(filter string) { - if filter == "" { - return - } - - req.params.Set("filter", filter) -} - -// generateUnexpectedResponseCodeError consumes the rest of the body, closes -// the body stream and generates an error indicating the status code was -// unexpected. -func generateUnexpectedResponseCodeError(resp *http.Response) error { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - closeResponseBody(resp) - - trimmed := strings.TrimSpace(string(buf.Bytes())) - return StatusError{Code: resp.StatusCode, Body: trimmed} -} - -func requireNotFoundOrOK(resp *http.Response) (bool, *http.Response, error) { - switch resp.StatusCode { - case 200: - return true, resp, nil - case 404: - return false, resp, nil - default: - return false, nil, generateUnexpectedResponseCodeError(resp) - } -} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go deleted file mode 100644 index 0040ca6e7a8..00000000000 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "net" - "strconv" -) - -type Weights struct { - Passing int - Warning int -} - -type Node struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - Meta map[string]string - CreateIndex uint64 - ModifyIndex uint64 - Partition string `json:",omitempty"` - PeerName string `json:",omitempty"` - Locality *Locality `json:",omitempty"` -} - -type ServiceAddress struct { - Address string - Port int -} - -type CatalogService struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - NodeMeta map[string]string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTaggedAddresses map[string]ServiceAddress - ServiceTags []string - ServiceMeta map[string]string - ServicePort int - ServiceWeights Weights - ServiceEnableTagOverride bool - ServiceProxy *AgentServiceConnectProxyConfig - ServiceLocality *Locality `json:",omitempty"` - CreateIndex uint64 - Checks HealthChecks - ModifyIndex uint64 - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` -} - -type CatalogNode struct { - Node *Node - Services map[string]*AgentService -} - -type CatalogNodeServiceList struct { - Node *Node - Services []*AgentService -} - -type CatalogRegistration struct { - ID string - Node string - Address string - TaggedAddresses map[string]string - NodeMeta map[string]string - Datacenter string - Service *AgentService - Check *AgentCheck - Checks HealthChecks - SkipNodeUpdate bool - Partition string `json:",omitempty"` - Locality *Locality `json:",omitempty"` -} - -type CatalogDeregistration struct { - Node string - Address string `json:",omitempty"` // Obsolete. - Datacenter string - ServiceID string - CheckID string - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` -} - -type CompoundServiceName struct { - Name string - - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - // Partitions are a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -// GatewayService associates a gateway with a linked service. -// It also contains service-specific gateway configuration like ingress listener port and protocol. -type GatewayService struct { - Gateway CompoundServiceName - Service CompoundServiceName - GatewayKind ServiceKind - Port int `json:",omitempty"` - Protocol string `json:",omitempty"` - Hosts []string `json:",omitempty"` - CAFile string `json:",omitempty"` - CertFile string `json:",omitempty"` - KeyFile string `json:",omitempty"` - SNI string `json:",omitempty"` - FromWildcard bool `json:",omitempty"` -} - -// Catalog can be used to query the Catalog endpoints -type Catalog struct { - c *Client -} - -// Catalog returns a handle to the catalog endpoints -func (c *Client) Catalog() *Catalog { - return &Catalog{c} -} - -func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/register") - r.setWriteOptions(q) - r.obj = reg - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/deregister") - r.setWriteOptions(q) - r.obj = dereg - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Datacenters is used to query for all the known datacenters -func (c *Catalog) Datacenters() ([]string, error) { - r := c.c.newRequest("GET", "/v1/catalog/datacenters") - _, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out []string - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to query all the known nodes -func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/nodes") - r.setQueryOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Node - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Services is used to query for all known services -func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/services") - r.setQueryOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]string - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query catalog entries for a given service -func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - var tags []string - if tag != "" { - tags = []string{tag} - } - return c.service(service, tags, q, false) -} - -// Supports multiple tags for filtering -func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - return c.service(service, tags, q, false) -} - -// Connect is used to query catalog entries for a given Connect-enabled service -func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - var tags []string - if tag != "" { - tags = []string{tag} - } - return c.service(service, tags, q, true) -} - -// Supports multiple tags for filtering -func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - return c.service(service, tags, q, true) -} - -func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { - path := "/v1/catalog/service/" + service - if connect { - path = "/v1/catalog/connect/" + service - } - r := c.c.newRequest("GET", path) - r.setQueryOptions(q) - if len(tags) > 0 { - for _, tag := range tags { - r.params.Add("tag", tag) - } - } - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CatalogService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Node is used to query for service information about a single node -func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNode - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// NodeServiceList is used to query for service information about a single node. It differs from -// the Node function only in its return type which will contain a list of services as opposed to -// a map of service ids to services. This different structure allows for using the wildcard specifier -// '*' for the Namespace in the QueryOptions. -func (c *Catalog) NodeServiceList(node string, q *QueryOptions) (*CatalogNodeServiceList, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node-services/"+node) - r.setQueryOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNodeServiceList - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// GatewayServices is used to query the services associated with an ingress gateway or terminating gateway. -func (c *Catalog) GatewayServices(gateway string, q *QueryOptions) ([]*GatewayService, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/gateway-services/"+gateway) - r.setQueryOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*GatewayService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -func ParseServiceAddr(addrPort string) (ServiceAddress, error) { - port := 0 - host, portStr, err := net.SplitHostPort(addrPort) - if err == nil { - port, err = strconv.Atoi(portStr) - } - return ServiceAddress{Address: host, Port: port}, err -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go deleted file mode 100644 index 405e92ef274..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "strconv" - "strings" - "time" - - "github.com/mitchellh/mapstructure" -) - -const ( - ServiceDefaults string = "service-defaults" - ProxyDefaults string = "proxy-defaults" - ServiceRouter string = "service-router" - ServiceSplitter string = "service-splitter" - ServiceResolver string = "service-resolver" - IngressGateway string = "ingress-gateway" - TerminatingGateway string = "terminating-gateway" - ServiceIntentions string = "service-intentions" - MeshConfig string = "mesh" - ExportedServices string = "exported-services" - SamenessGroup string = "sameness-group" - RateLimitIPConfig string = "control-plane-request-limit" - - ProxyConfigGlobal string = "global" - MeshConfigMesh string = "mesh" - APIGateway string = "api-gateway" - TCPRoute string = "tcp-route" - InlineCertificate string = "inline-certificate" - HTTPRoute string = "http-route" - JWTProvider string = "jwt-provider" -) - -const ( - BuiltinAWSLambdaExtension string = "builtin/aws/lambda" - BuiltinExtAuthzExtension string = "builtin/ext-authz" - BuiltinLuaExtension string = "builtin/lua" - BuiltinPropertyOverrideExtension string = "builtin/property-override" - BuiltinWasmExtension string = "builtin/wasm" - // BuiltinValidateExtension should not be exposed directly or accepted as a valid configured - // extension type, as it is only used indirectly via troubleshooting tools. It is included here - // for common reference alongside other builtin extensions. - BuiltinValidateExtension string = "builtin/proxy/validate" -) - -type ConfigEntry interface { - GetKind() string - GetName() string - GetPartition() string - GetNamespace() string - GetMeta() map[string]string - GetCreateIndex() uint64 - GetModifyIndex() uint64 -} - -type MeshGatewayMode string - -const ( - // MeshGatewayModeDefault represents no specific mode and should - // be used to indicate that a different layer of the configuration - // chain should take precedence - MeshGatewayModeDefault MeshGatewayMode = "" - - // MeshGatewayModeNone represents that the Upstream Connect connections - // should be direct and not flow through a mesh gateway. - MeshGatewayModeNone MeshGatewayMode = "none" - - // MeshGatewayModeLocal represents that the Upstream Connect connections - // should be made to a mesh gateway in the local datacenter. - MeshGatewayModeLocal MeshGatewayMode = "local" - - // MeshGatewayModeRemote represents that the Upstream Connect connections - // should be made to a mesh gateway in a remote datacenter. - MeshGatewayModeRemote MeshGatewayMode = "remote" -) - -// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect -// services -type MeshGatewayConfig struct { - // Mode is the mode that should be used for the upstream connection. - Mode MeshGatewayMode `json:",omitempty"` -} - -type ProxyMode string - -const ( - // ProxyModeDefault represents no specific mode and should - // be used to indicate that a different layer of the configuration - // chain should take precedence - ProxyModeDefault ProxyMode = "" - - // ProxyModeTransparent represents that inbound and outbound application - // traffic is being captured and redirected through the proxy. - ProxyModeTransparent ProxyMode = "transparent" - - // ProxyModeDirect represents that the proxy's listeners must be dialed directly - // by the local application and other proxies. - ProxyModeDirect ProxyMode = "direct" -) - -type TransparentProxyConfig struct { - // The port of the listener where outbound application traffic is being redirected to. - OutboundListenerPort int `json:",omitempty" alias:"outbound_listener_port"` - - // DialedDirectly indicates whether transparent proxies can dial this proxy instance directly. - // The discovery chain is not considered when dialing a service instance directly. - // This setting is useful when addressing stateful services, such as a database cluster with a leader node. - DialedDirectly bool `json:",omitempty" alias:"dialed_directly"` -} - -type MutualTLSMode string - -const ( - // MutualTLSModeDefault represents no specific mode and should - // be used to indicate that a different layer of the configuration - // chain should take precedence. - MutualTLSModeDefault MutualTLSMode = "" - - // MutualTLSModeStrict requires mTLS for incoming traffic. - MutualTLSModeStrict MutualTLSMode = "strict" - - // MutualTLSModePermissive allows incoming non-mTLS traffic. - MutualTLSModePermissive MutualTLSMode = "permissive" -) - -// ExposeConfig describes HTTP paths to expose through Envoy outside of Connect. -// Users can expose individual paths and/or all HTTP/GRPC paths for checks. -type ExposeConfig struct { - // Checks defines whether paths associated with Consul checks will be exposed. - // This flag triggers exposing all HTTP and GRPC check paths registered for the service. - Checks bool `json:",omitempty"` - - // Paths is the list of paths exposed through the proxy. - Paths []ExposePath `json:",omitempty"` -} - -// EnvoyExtension has configuration for an extension that patches Envoy resources. -type EnvoyExtension struct { - Name string - Required bool - Arguments map[string]interface{} `bexpr:"-"` - ConsulVersion string - EnvoyVersion string -} - -type ExposePath struct { - // ListenerPort defines the port of the proxy's listener for exposed paths. - ListenerPort int `json:",omitempty" alias:"listener_port"` - - // Path is the path to expose through the proxy, ie. "/metrics." - Path string `json:",omitempty"` - - // LocalPathPort is the port that the service is listening on for the given path. - LocalPathPort int `json:",omitempty" alias:"local_path_port"` - - // Protocol describes the upstream's service protocol. - // Valid values are "http" and "http2", defaults to "http" - Protocol string `json:",omitempty"` - - // ParsedFromCheck is set if this path was parsed from a registered check - ParsedFromCheck bool -} - -type LogSinkType string - -const ( - DefaultLogSinkType LogSinkType = "" - FileLogSinkType LogSinkType = "file" - StdErrLogSinkType LogSinkType = "stderr" - StdOutLogSinkType LogSinkType = "stdout" -) - -// AccessLogsConfig contains the associated default settings for all Envoy instances within the datacenter or partition -type AccessLogsConfig struct { - // Enabled turns off all access logging - Enabled bool `json:",omitempty" alias:"enabled"` - - // DisableListenerLogs turns off just listener logs for connections rejected by Envoy because they don't - // have a matching listener filter. - DisableListenerLogs bool `json:",omitempty" alias:"disable_listener_logs"` - - // Type selects the output for logs: "file", "stderr". "stdout" - Type LogSinkType `json:",omitempty" alias:"type"` - - // Path is the output file to write logs - Path string `json:",omitempty" alias:"path"` - - // The presence of one format string or the other implies the access log string encoding. - // Defining Both is invalid. - JSONFormat string `json:",omitempty" alias:"json_format"` - TextFormat string `json:",omitempty" alias:"text_format"` -} - -type UpstreamConfiguration struct { - // Overrides is a slice of per-service configuration. The name field is - // required. - Overrides []*UpstreamConfig `json:",omitempty"` - - // Defaults contains default configuration for all upstreams of a given - // service. The name field must be empty. - Defaults *UpstreamConfig `json:",omitempty"` -} - -type UpstreamConfig struct { - // Name is only accepted within service-defaults.upstreamConfig.overrides . - Name string `json:",omitempty"` - - // Partition is only accepted within service-defaults.upstreamConfig.overrides . - Partition string `json:",omitempty"` - - // Namespace is only accepted within service-defaults.upstreamConfig.overrides . - Namespace string `json:",omitempty"` - - // Peer is only accepted within service-defaults.upstreamConfig.overrides . - Peer string `json:",omitempty"` - - // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's - // listener. - // - // Note: This escape hatch is NOT compatible with the discovery chain and - // will be ignored if a discovery chain is active. - EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"` - - // EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's - // cluster. The Connect client TLS certificate and context will be injected - // overriding any TLS settings present. - // - // Note: This escape hatch is NOT compatible with the discovery chain and - // will be ignored if a discovery chain is active. - EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"` - - // Protocol describes the upstream's service protocol. Valid values are "tcp", - // "http" and "grpc". Anything else is treated as tcp. The enables protocol - // aware features like per-request metrics and connection pooling, tracing, - // routing etc. - Protocol string `json:",omitempty"` - - // ConnectTimeoutMs is the number of milliseconds to timeout making a new - // connection to this upstream. Defaults to 5000 (5 seconds) if not set. - ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"` - - // Limits are the set of limits that are applied to the proxy for a specific upstream of a - // service instance. - Limits *UpstreamLimits `json:",omitempty"` - - // PassiveHealthCheck configuration determines how upstream proxy instances will - // be monitored for removal from the load balancing pool. - PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"` - - // MeshGatewayConfig controls how Mesh Gateways are configured and used - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" ` - - // BalanceOutboundConnections indicates that the proxy should attempt to evenly distribute - // outbound connections across worker threads. Only used by envoy proxies. - BalanceOutboundConnections string `json:",omitempty" alias:"balance_outbound_connections"` -} - -// DestinationConfig represents a virtual service, i.e. one that is external to Consul -type DestinationConfig struct { - // Addresses of the endpoint; hostname or IP - Addresses []string `json:",omitempty"` - - // Port allowed within this endpoint - Port int `json:",omitempty"` -} - -type PassiveHealthCheck struct { - // Interval between health check analysis sweeps. Each sweep may remove - // hosts or return hosts to the pool. - Interval time.Duration `json:",omitempty"` - - // MaxFailures is the count of consecutive failures that results in a host - // being removed from the pool. - MaxFailures uint32 `alias:"max_failures"` - - // EnforcingConsecutive5xx is the % chance that a host will be actually ejected - // when an outlier status is detected through consecutive 5xx. - // This setting can be used to disable ejection or to ramp it up slowly. - EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"` - - // The maximum % of an upstream cluster that can be ejected due to outlier detection. - // Defaults to 10% but will eject at least one host regardless of the value. - MaxEjectionPercent *uint32 `json:",omitempty" alias:"max_ejection_percent"` - - // The base time that a host is ejected for. The real time is equal to the base time - // multiplied by the number of times the host has been ejected and is capped by - // max_ejection_time (Default 300s). Defaults to 30000ms or 30s. - BaseEjectionTime *time.Duration `json:",omitempty" alias:"base_ejection_time"` -} - -// UpstreamLimits describes the limits that are associated with a specific -// upstream of a service instance. -type UpstreamLimits struct { - // MaxConnections is the maximum number of connections the local proxy can - // make to the upstream service. - MaxConnections *int `alias:"max_connections"` - - // MaxPendingRequests is the maximum number of requests that will be queued - // waiting for an available connection. This is mostly applicable to HTTP/1.1 - // clusters since all HTTP/2 requests are streamed over a single - // connection. - MaxPendingRequests *int `alias:"max_pending_requests"` - - // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed - // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 - // clusters since all HTTP/1.1 requests are limited by MaxConnections. - MaxConcurrentRequests *int `alias:"max_concurrent_requests"` -} - -type ServiceConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Protocol string `json:",omitempty"` - Mode ProxyMode `json:",omitempty"` - TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` - MutualTLSMode MutualTLSMode `json:",omitempty" alias:"mutual_tls_mode"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - ExternalSNI string `json:",omitempty" alias:"external_sni"` - UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` - Destination *DestinationConfig `json:",omitempty"` - MaxInboundConnections int `json:",omitempty" alias:"max_inbound_connections"` - LocalConnectTimeoutMs int `json:",omitempty" alias:"local_connect_timeout_ms"` - LocalRequestTimeoutMs int `json:",omitempty" alias:"local_request_timeout_ms"` - BalanceInboundConnections string `json:",omitempty" alias:"balance_inbound_connections"` - EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 -} - -func (s *ServiceConfigEntry) GetKind() string { return s.Kind } -func (s *ServiceConfigEntry) GetName() string { return s.Name } -func (s *ServiceConfigEntry) GetPartition() string { return s.Partition } -func (s *ServiceConfigEntry) GetNamespace() string { return s.Namespace } -func (s *ServiceConfigEntry) GetMeta() map[string]string { return s.Meta } -func (s *ServiceConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex } -func (s *ServiceConfigEntry) GetModifyIndex() uint64 { return s.ModifyIndex } - -type ProxyConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Mode ProxyMode `json:",omitempty"` - TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` - MutualTLSMode MutualTLSMode `json:",omitempty" alias:"mutual_tls_mode"` - Config map[string]interface{} `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - AccessLogs *AccessLogsConfig `json:",omitempty" alias:"access_logs"` - EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` - FailoverPolicy *ServiceResolverFailoverPolicy `json:",omitempty" alias:"failover_policy"` - PrioritizeByLocality *ServiceResolverPrioritizeByLocality `json:",omitempty" alias:"prioritize_by_locality"` - - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 -} - -func (p *ProxyConfigEntry) GetKind() string { return p.Kind } -func (p *ProxyConfigEntry) GetName() string { return ProxyConfigGlobal } -func (p *ProxyConfigEntry) GetPartition() string { return p.Partition } -func (p *ProxyConfigEntry) GetNamespace() string { return p.Namespace } -func (p *ProxyConfigEntry) GetMeta() map[string]string { return p.Meta } -func (p *ProxyConfigEntry) GetCreateIndex() uint64 { return p.CreateIndex } -func (p *ProxyConfigEntry) GetModifyIndex() uint64 { return p.ModifyIndex } - -func makeConfigEntry(kind, name string) (ConfigEntry, error) { - switch kind { - case ServiceDefaults: - return &ServiceConfigEntry{Kind: kind, Name: name}, nil - case ProxyDefaults: - return &ProxyConfigEntry{Kind: kind, Name: name}, nil - case ServiceRouter: - return &ServiceRouterConfigEntry{Kind: kind, Name: name}, nil - case ServiceSplitter: - return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil - case ServiceResolver: - return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil - case IngressGateway: - return &IngressGatewayConfigEntry{Kind: kind, Name: name}, nil - case TerminatingGateway: - return &TerminatingGatewayConfigEntry{Kind: kind, Name: name}, nil - case ServiceIntentions: - return &ServiceIntentionsConfigEntry{Kind: kind, Name: name}, nil - case MeshConfig: - return &MeshConfigEntry{}, nil - case ExportedServices: - return &ExportedServicesConfigEntry{Name: name}, nil - case SamenessGroup: - return &SamenessGroupConfigEntry{Kind: kind, Name: name}, nil - case APIGateway: - return &APIGatewayConfigEntry{Kind: kind, Name: name}, nil - case TCPRoute: - return &TCPRouteConfigEntry{Kind: kind, Name: name}, nil - case InlineCertificate: - return &InlineCertificateConfigEntry{Kind: kind, Name: name}, nil - case HTTPRoute: - return &HTTPRouteConfigEntry{Kind: kind, Name: name}, nil - case RateLimitIPConfig: - return &RateLimitIPConfigEntry{Kind: kind, Name: name}, nil - case JWTProvider: - return &JWTProviderConfigEntry{Kind: kind, Name: name}, nil - default: - return nil, fmt.Errorf("invalid config entry kind: %s", kind) - } -} - -func MakeConfigEntry(kind, name string) (ConfigEntry, error) { - return makeConfigEntry(kind, name) -} - -// DecodeConfigEntry will decode the result of using json.Unmarshal of a config -// entry into a map[string]interface{}. -// -// Important caveats: -// -// - This will NOT work if the map[string]interface{} was produced using HCL -// decoding as that requires more extensive parsing to work around the issues -// with map[string][]interface{} that arise. -// -// - This will only decode fields using their camel case json field -// representations. -func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { - var entry ConfigEntry - - kindVal, ok := raw["Kind"] - if !ok { - kindVal, ok = raw["kind"] - } - if !ok { - return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") - } - - if kindStr, ok := kindVal.(string); ok { - newEntry, err := makeConfigEntry(kindStr, "") - if err != nil { - return nil, err - } - entry = newEntry - } else { - return nil, fmt.Errorf("Kind value in payload is not a string") - } - - decodeConf := &mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToTimeHookFunc(time.RFC3339), - ), - Result: &entry, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decodeConf) - if err != nil { - return nil, err - } - - return entry, decoder.Decode(raw) -} - -func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) { - var raw map[string]interface{} - if err := json.Unmarshal(data, &raw); err != nil { - return nil, err - } - - return DecodeConfigEntry(raw) -} - -func decodeConfigEntrySlice(raw []map[string]interface{}) ([]ConfigEntry, error) { - var entries []ConfigEntry - for _, rawEntry := range raw { - entry, err := DecodeConfigEntry(rawEntry) - if err != nil { - return nil, err - } - entries = append(entries, entry) - } - return entries, nil -} - -// ConfigEntries can be used to query the Config endpoints -type ConfigEntries struct { - c *Client -} - -// Config returns a handle to the Config endpoints -func (c *Client) ConfigEntries() *ConfigEntries { - return &ConfigEntries{c} -} - -func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) { - if kind == "" || name == "" { - return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty") - } - - entry, err := makeConfigEntry(kind, name) - if err != nil { - return nil, nil, err - } - - r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name)) - r.setQueryOptions(q) - rtt, resp, err := conf.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, entry); err != nil { - return nil, nil, err - } - - return entry, qm, nil -} - -func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) { - if kind == "" { - return nil, nil, fmt.Errorf("The kind parameter must not be empty") - } - - r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind)) - r.setQueryOptions(q) - rtt, resp, err := conf.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var raw []map[string]interface{} - if err := decodeBody(resp, &raw); err != nil { - return nil, nil, err - } - - entries, err := decodeConfigEntrySlice(raw) - if err != nil { - return nil, nil, err - } - - return entries, qm, nil -} - -func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) { - return conf.set(entry, nil, w) -} - -func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { - return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) -} - -func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { - r := conf.c.newRequest("PUT", "/v1/config") - r.setWriteOptions(w) - for param, value := range params { - r.params.Set(param, value) - } - r.obj = entry - rtt, resp, err := conf.c.doRequest(r) - if err != nil { - return false, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return false, nil, err - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - - wm := &WriteMeta{RequestTime: rtt} - return res, wm, nil -} - -func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) { - _, wm, err := conf.delete(kind, name, nil, w) - return wm, err -} - -// DeleteCAS performs a Check-And-Set deletion of the given config entry, and -// returns true if it was successful. If the provided index no longer matches -// the entry's ModifyIndex (i.e. it was modified by another process) then the -// operation will fail and return false. -func (conf *ConfigEntries) DeleteCAS(kind, name string, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { - return conf.delete(kind, name, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) -} - -func (conf *ConfigEntries) delete(kind, name string, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { - if kind == "" || name == "" { - return false, nil, fmt.Errorf("Both kind and name parameters must not be empty") - } - - r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name)) - r.setWriteOptions(w) - for param, value := range params { - r.params.Set(param, value) - } - - rtt, resp, err := conf.c.doRequest(r) - if err != nil { - return false, nil, err - } - defer closeResponseBody(resp) - - if err := requireOK(resp); err != nil { - return false, nil, err - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - - res := strings.Contains(buf.String(), "true") - wm := &WriteMeta{RequestTime: rtt} - return res, wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go deleted file mode 100644 index 3696f7be554..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "encoding/json" - "time" -) - -type ServiceRouterConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - - Routes []ServiceRoute `json:",omitempty"` - - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 -} - -func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind } -func (e *ServiceRouterConfigEntry) GetName() string { return e.Name } -func (e *ServiceRouterConfigEntry) GetPartition() string { return e.Partition } -func (e *ServiceRouterConfigEntry) GetNamespace() string { return e.Namespace } -func (e *ServiceRouterConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } - -type ServiceRoute struct { - Match *ServiceRouteMatch `json:",omitempty"` - Destination *ServiceRouteDestination `json:",omitempty"` -} - -type ServiceRouteMatch struct { - HTTP *ServiceRouteHTTPMatch `json:",omitempty"` -} - -type ServiceRouteHTTPMatch struct { - PathExact string `json:",omitempty" alias:"path_exact"` - PathPrefix string `json:",omitempty" alias:"path_prefix"` - PathRegex string `json:",omitempty" alias:"path_regex"` - - Header []ServiceRouteHTTPMatchHeader `json:",omitempty"` - QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty" alias:"query_param"` - Methods []string `json:",omitempty"` -} - -type ServiceRouteHTTPMatchHeader struct { - Name string - Present bool `json:",omitempty"` - Exact string `json:",omitempty"` - Prefix string `json:",omitempty"` - Suffix string `json:",omitempty"` - Regex string `json:",omitempty"` - Invert bool `json:",omitempty"` -} - -type ServiceRouteHTTPMatchQueryParam struct { - Name string - Present bool `json:",omitempty"` - Exact string `json:",omitempty"` - Regex string `json:",omitempty"` -} - -type ServiceRouteDestination struct { - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` - PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"` - RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` - IdleTimeout time.Duration `json:",omitempty" alias:"idle_timeout"` - NumRetries uint32 `json:",omitempty" alias:"num_retries"` - RetryOnConnectFailure bool `json:",omitempty" alias:"retry_on_connect_failure"` - RetryOnStatusCodes []uint32 `json:",omitempty" alias:"retry_on_status_codes"` - RetryOn []string `json:",omitempty" alias:"retry_on"` - RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` - ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` -} - -func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) { - type Alias ServiceRouteDestination - exported := &struct { - RequestTimeout string `json:",omitempty"` - IdleTimeout string `json:",omitempty"` - *Alias - }{ - RequestTimeout: e.RequestTimeout.String(), - IdleTimeout: e.IdleTimeout.String(), - Alias: (*Alias)(e), - } - if e.RequestTimeout == 0 { - exported.RequestTimeout = "" - } - if e.IdleTimeout == 0 { - exported.IdleTimeout = "" - } - - return json.Marshal(exported) -} - -func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error { - type Alias ServiceRouteDestination - aux := &struct { - RequestTimeout string - IdleTimeout string - *Alias - }{ - Alias: (*Alias)(e), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.RequestTimeout != "" { - if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil { - return err - } - } - if aux.IdleTimeout != "" { - if e.IdleTimeout, err = time.ParseDuration(aux.IdleTimeout); err != nil { - return err - } - } - return nil -} - -type ServiceSplitterConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - - Splits []ServiceSplit `json:",omitempty"` - - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 -} - -func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind } -func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name } -func (e *ServiceSplitterConfigEntry) GetPartition() string { return e.Partition } -func (e *ServiceSplitterConfigEntry) GetNamespace() string { return e.Namespace } -func (e *ServiceSplitterConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } - -type ServiceSplit struct { - Weight float32 - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` - RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` - ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` -} - -type ServiceResolverConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - - DefaultSubset string `json:",omitempty" alias:"default_subset"` - Subsets map[string]ServiceResolverSubset `json:",omitempty"` - Redirect *ServiceResolverRedirect `json:",omitempty"` - Failover map[string]ServiceResolverFailover `json:",omitempty"` - ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` - RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` - - // PrioritizeByLocality controls whether the locality of services within the - // local partition will be used to prioritize connectivity. - PrioritizeByLocality *ServiceResolverPrioritizeByLocality `json:",omitempty" alias:"prioritize_by_locality"` - - // LoadBalancer determines the load balancing policy and configuration for services - // issuing requests to this upstream service. - LoadBalancer *LoadBalancer `json:",omitempty" alias:"load_balancer"` - - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 -} - -func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) { - type Alias ServiceResolverConfigEntry - exported := &struct { - ConnectTimeout string `json:",omitempty"` - *Alias - }{ - ConnectTimeout: e.ConnectTimeout.String(), - Alias: (*Alias)(e), - } - if e.ConnectTimeout == 0 { - exported.ConnectTimeout = "" - } - - return json.Marshal(exported) -} - -func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error { - type Alias ServiceResolverConfigEntry - aux := &struct { - ConnectTimeout string - *Alias - }{ - Alias: (*Alias)(e), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.ConnectTimeout != "" { - if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { - return err - } - } - return nil -} - -func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind } -func (e *ServiceResolverConfigEntry) GetName() string { return e.Name } -func (e *ServiceResolverConfigEntry) GetPartition() string { return e.Partition } -func (e *ServiceResolverConfigEntry) GetNamespace() string { return e.Namespace } -func (e *ServiceResolverConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } - -type ServiceResolverSubset struct { - Filter string `json:",omitempty"` - OnlyPassing bool `json:",omitempty" alias:"only_passing"` -} - -type ServiceResolverRedirect struct { - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` - Datacenter string `json:",omitempty"` - Peer string `json:",omitempty"` - SamenessGroup string `json:",omitempty" alias:"sameness_group"` -} - -type ServiceResolverFailover struct { - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - // Referencing other partitions is not supported. - Namespace string `json:",omitempty"` - Datacenters []string `json:",omitempty"` - Targets []ServiceResolverFailoverTarget `json:",omitempty"` - Policy *ServiceResolverFailoverPolicy `json:",omitempty"` - SamenessGroup string `json:",omitempty" alias:"sameness_group"` -} - -type ServiceResolverFailoverTarget struct { - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Datacenter string `json:",omitempty"` - Peer string `json:",omitempty"` -} - -type ServiceResolverFailoverPolicy struct { - // Mode specifies the type of failover that will be performed. Valid values are - // "sequential", "" (equivalent to "sequential") and "order-by-locality". - Mode string `json:",omitempty"` - Regions []string `json:",omitempty"` -} - -type ServiceResolverPrioritizeByLocality struct { - // Mode specifies the type of prioritization that will be performed - // when selecting nodes in the local partition. - // Valid values are: "" (default "none"), "none", and "failover". - Mode string `json:",omitempty"` -} - -// LoadBalancer determines the load balancing policy and configuration for services -// issuing requests to this upstream service. -type LoadBalancer struct { - // Policy is the load balancing policy used to select a host - Policy string `json:",omitempty"` - - // RingHashConfig contains configuration for the "ring_hash" policy type - RingHashConfig *RingHashConfig `json:",omitempty" alias:"ring_hash_config"` - - // LeastRequestConfig contains configuration for the "least_request" policy type - LeastRequestConfig *LeastRequestConfig `json:",omitempty" alias:"least_request_config"` - - // HashPolicies is a list of hash policies to use for hashing load balancing algorithms. - // Hash policies are evaluated individually and combined such that identical lists - // result in the same hash. - // If no hash policies are present, or none are successfully evaluated, - // then a random backend host will be selected. - HashPolicies []HashPolicy `json:",omitempty" alias:"hash_policies"` -} - -// RingHashConfig contains configuration for the "ring_hash" policy type -type RingHashConfig struct { - // MinimumRingSize determines the minimum number of entries in the hash ring - MinimumRingSize uint64 `json:",omitempty" alias:"minimum_ring_size"` - - // MaximumRingSize determines the maximum number of entries in the hash ring - MaximumRingSize uint64 `json:",omitempty" alias:"maximum_ring_size"` -} - -// LeastRequestConfig contains configuration for the "least_request" policy type -type LeastRequestConfig struct { - // ChoiceCount determines the number of random healthy hosts from which to select the one with the least requests. - ChoiceCount uint32 `json:",omitempty" alias:"choice_count"` -} - -// HashPolicy defines which attributes will be hashed by hash-based LB algorithms -type HashPolicy struct { - // Field is the attribute type to hash on. - // Must be one of "header","cookie", or "query_parameter". - // Cannot be specified along with SourceIP. - Field string `json:",omitempty"` - - // FieldValue is the value to hash. - // ie. header name, cookie name, URL query parameter name - // Cannot be specified along with SourceIP. - FieldValue string `json:",omitempty" alias:"field_value"` - - // CookieConfig contains configuration for the "cookie" hash policy type. - CookieConfig *CookieConfig `json:",omitempty" alias:"cookie_config"` - - // SourceIP determines whether the hash should be of the source IP rather than of a field and field value. - // Cannot be specified along with Field or FieldValue. - SourceIP bool `json:",omitempty" alias:"source_ip"` - - // Terminal will short circuit the computation of the hash when multiple hash policies are present. - // If a hash is computed when a Terminal policy is evaluated, - // then that hash will be used and subsequent hash policies will be ignored. - Terminal bool `json:",omitempty"` -} - -// CookieConfig contains configuration for the "cookie" hash policy type. -// This is specified to have Envoy generate a cookie for a client on its first request. -type CookieConfig struct { - // Generates a session cookie with no expiration. - Session bool `json:",omitempty"` - - // TTL for generated cookies. Cannot be specified for session cookies. - TTL time.Duration `json:",omitempty"` - - // The path to set for the cookie - Path string `json:",omitempty"` -} - -// HTTPHeaderModifiers is a set of rules for HTTP header modification that -// should be performed by proxies as the request passes through them. It can -// operate on either request or response headers depending on the context in -// which it is used. -type HTTPHeaderModifiers struct { - // Add is a set of name -> value pairs that should be appended to the request - // or response (i.e. allowing duplicates if the same header already exists). - Add map[string]string `json:",omitempty"` - - // Set is a set of name -> value pairs that should be added to the request or - // response, overwriting any existing header values of the same name. - Set map[string]string `json:",omitempty"` - - // Remove is the set of header names that should be stripped from the request - // or response. - Remove []string `json:",omitempty"` -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go deleted file mode 100644 index 97920e40ddc..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import "encoding/json" - -// ExportedServicesConfigEntry manages the exported services for a single admin partition. -// Admin Partitions are a Consul Enterprise feature. -type ExportedServicesConfigEntry struct { - // Name is the name of the partition the ExportedServicesConfigEntry applies to. - // Partitioning is a Consul Enterprise feature. - Name string `json:",omitempty"` - - // Partition is the partition where the ExportedServicesConfigEntry is stored. - // If the partition does not match the name, the name will overwrite the partition. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Services is a list of services to be exported and the list of partitions - // to expose them to. - Services []ExportedService `json:",omitempty"` - - Meta map[string]string `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 -} - -// ExportedService manages the exporting of a service in the local partition to -// other partitions. -type ExportedService struct { - // Name is the name of the service to be exported. - Name string - - // Namespace is the namespace to export the service from. - Namespace string `json:",omitempty"` - - // Consumers is a list of downstream consumers of the service to be exported. - Consumers []ServiceConsumer `json:",omitempty"` -} - -// ServiceConsumer represents a downstream consumer of the service to be exported. -// At most one of Partition or Peer must be specified. -type ServiceConsumer struct { - // Partition is the admin partition to export the service to. - Partition string `json:",omitempty"` - - // Peer is the name of the peer to export the service to. - Peer string `json:",omitempty" alias:"peer_name"` - - // SamenessGroup is the name of the sameness group to export the service to. - SamenessGroup string `json:",omitempty" alias:"sameness_group"` -} - -func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } -func (e *ExportedServicesConfigEntry) GetName() string { return e.Name } -func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name } -func (e *ExportedServicesConfigEntry) GetNamespace() string { return "" } -func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } - -// MarshalJSON adds the Kind field so that the JSON can be decoded back into the -// correct type. -func (e *ExportedServicesConfigEntry) MarshalJSON() ([]byte, error) { - type Alias ExportedServicesConfigEntry - source := &struct { - Kind string - *Alias - }{ - Kind: ExportedServices, - Alias: (*Alias)(e), - } - return json.Marshal(source) -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go deleted file mode 100644 index b59f1c0621f..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// IngressGatewayConfigEntry manages the configuration for an ingress service -// with the given name. -type IngressGatewayConfigEntry struct { - // Kind of the config entry. This should be set to api.IngressGateway. - Kind string - - // Name is used to match the config entry with its associated ingress gateway - // service. This should match the name provided in the service definition. - Name string - - // Partition is the partition the IngressGateway is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the IngressGateway is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // TLS holds the TLS configuration for this gateway. - TLS GatewayTLSConfig - - // Listeners declares what ports the ingress gateway should listen on, and - // what services to associated to those ports. - Listeners []IngressListener - - Meta map[string]string `json:",omitempty"` - - // Defaults is default configuration for all upstream services - Defaults *IngressServiceConfig `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 -} - -type IngressServiceConfig struct { - MaxConnections *uint32 - MaxPendingRequests *uint32 - MaxConcurrentRequests *uint32 - - // PassiveHealthCheck configuration determines how upstream proxy instances will - // be monitored for removal from the load balancing pool. - PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"` -} - -type GatewayTLSConfig struct { - // Indicates that TLS should be enabled for this gateway service. - Enabled bool - - // SDS allows configuring TLS certificate from an SDS service. - SDS *GatewayTLSSDSConfig `json:",omitempty"` - - TLSMinVersion string `json:",omitempty" alias:"tls_min_version"` - TLSMaxVersion string `json:",omitempty" alias:"tls_max_version"` - - // Define a subset of cipher suites to restrict - // Only applicable to connections negotiated via TLS 1.2 or earlier - CipherSuites []string `json:",omitempty" alias:"cipher_suites"` -} - -type GatewayServiceTLSConfig struct { - // SDS allows configuring TLS certificate from an SDS service. - SDS *GatewayTLSSDSConfig `json:",omitempty"` -} - -type GatewayTLSSDSConfig struct { - ClusterName string `json:",omitempty" alias:"cluster_name"` - CertResource string `json:",omitempty" alias:"cert_resource"` -} - -// IngressListener manages the configuration for a listener on a specific port. -type IngressListener struct { - // Port declares the port on which the ingress gateway should listen for traffic. - Port int - - // Protocol declares what type of traffic this listener is expected to - // receive. Depending on the protocol, a listener might support multiplexing - // services over a single port, or additional discovery chain features. The - // current supported values are: (tcp | http | http2 | grpc). - Protocol string - - // Services declares the set of services to which the listener forwards - // traffic. - // - // For "tcp" protocol listeners, only a single service is allowed. - // For "http" listeners, multiple services can be declared. - Services []IngressService - - // TLS allows specifying some TLS configuration per listener. - TLS *GatewayTLSConfig `json:",omitempty"` -} - -// IngressService manages configuration for services that are exposed to -// ingress traffic. -type IngressService struct { - // Name declares the service to which traffic should be forwarded. - // - // This can either be a specific service, or the wildcard specifier, - // "*". If the wildcard specifier is provided, the listener must be of "http" - // protocol and means that the listener will forward traffic to all services. - // - // A name can be specified on multiple listeners, and will be exposed on both - // of the listeners. - Name string - - // Hosts is a list of hostnames which should be associated to this service on - // the defined listener. Only allowed on layer 7 protocols, this will be used - // to route traffic to the service by matching the Host header of the HTTP - // request. - // - // If a host is provided for a service that also has a wildcard specifier - // defined, the host will override the wildcard-specifier-provided - // ".*" domain for that listener. - // - // This cannot be specified when using the wildcard specifier, "*", or when - // using a "tcp" listener. - Hosts []string - - // Namespace is the namespace where the service is located. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition where the service is located. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // TLS allows specifying some TLS configuration per listener. - TLS *GatewayServiceTLSConfig `json:",omitempty"` - - // Allow HTTP header manipulation to be configured. - RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` - ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` - - MaxConnections *uint32 `json:",omitempty" alias:"max_connections"` - MaxPendingRequests *uint32 `json:",omitempty" alias:"max_pending_requests"` - MaxConcurrentRequests *uint32 `json:",omitempty" alias:"max_concurrent_requests"` - - // PassiveHealthCheck configuration determines how upstream proxy instances will - // be monitored for removal from the load balancing pool. - PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"` -} - -func (i *IngressGatewayConfigEntry) GetKind() string { return i.Kind } -func (i *IngressGatewayConfigEntry) GetName() string { return i.Name } -func (i *IngressGatewayConfigEntry) GetPartition() string { return i.Partition } -func (i *IngressGatewayConfigEntry) GetNamespace() string { return i.Namespace } -func (i *IngressGatewayConfigEntry) GetMeta() map[string]string { return i.Meta } -func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 { return i.CreateIndex } -func (i *IngressGatewayConfigEntry) GetModifyIndex() uint64 { return i.ModifyIndex } - -// TerminatingGatewayConfigEntry manages the configuration for a terminating gateway -// with the given name. -type TerminatingGatewayConfigEntry struct { - // Kind of the config entry. This should be set to api.TerminatingGateway. - Kind string - - // Name is used to match the config entry with its associated terminating gateway - // service. This should match the name provided in the service definition. - Name string - - // Services is a list of service names represented by the terminating gateway. - Services []LinkedService `json:",omitempty"` - - Meta map[string]string `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} - -// A LinkedService is a service represented by a terminating gateway -type LinkedService struct { - // Referencing other partitions is not supported. - - // Namespace is where the service is registered. - Namespace string `json:",omitempty"` - - // Name is the name of the service, as defined in Consul's catalog. - Name string `json:",omitempty"` - - // CAFile is the optional path to a CA certificate to use for TLS connections - // from the gateway to the linked service. - CAFile string `json:",omitempty" alias:"ca_file"` - - // CertFile is the optional path to a client certificate to use for TLS connections - // from the gateway to the linked service. - CertFile string `json:",omitempty" alias:"cert_file"` - - // KeyFile is the optional path to a private key to use for TLS connections - // from the gateway to the linked service. - KeyFile string `json:",omitempty" alias:"key_file"` - - // SNI is the optional name to specify during the TLS handshake with a linked service. - SNI string `json:",omitempty"` -} - -func (g *TerminatingGatewayConfigEntry) GetKind() string { return g.Kind } -func (g *TerminatingGatewayConfigEntry) GetName() string { return g.Name } -func (g *TerminatingGatewayConfigEntry) GetPartition() string { return g.Partition } -func (g *TerminatingGatewayConfigEntry) GetNamespace() string { return g.Namespace } -func (g *TerminatingGatewayConfigEntry) GetMeta() map[string]string { return g.Meta } -func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 { return g.CreateIndex } -func (g *TerminatingGatewayConfigEntry) GetModifyIndex() uint64 { return g.ModifyIndex } - -// APIGatewayConfigEntry manages the configuration for an API gateway -// with the given name. -type APIGatewayConfigEntry struct { - // Kind of the config entry. This should be set to api.APIGateway. - Kind string - - // Name is used to match the config entry with its associated api gateway - // service. This should match the name provided in the service definition. - Name string - - Meta map[string]string `json:",omitempty"` - - // Listeners is the set of listener configuration to which an API Gateway - // might bind. - Listeners []APIGatewayListener - // Status is the asynchronous status which an APIGateway propagates to the user. - Status ConfigEntryStatus - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} - -func (g *APIGatewayConfigEntry) GetKind() string { return g.Kind } -func (g *APIGatewayConfigEntry) GetName() string { return g.Name } -func (g *APIGatewayConfigEntry) GetPartition() string { return g.Partition } -func (g *APIGatewayConfigEntry) GetNamespace() string { return g.Namespace } -func (g *APIGatewayConfigEntry) GetMeta() map[string]string { return g.Meta } -func (g *APIGatewayConfigEntry) GetCreateIndex() uint64 { return g.CreateIndex } -func (g *APIGatewayConfigEntry) GetModifyIndex() uint64 { return g.ModifyIndex } - -// APIGatewayListener represents an individual listener for an APIGateway -type APIGatewayListener struct { - // Name is the name of the listener in a given gateway. This must be - // unique within a gateway. - Name string - // Hostname is the host name that a listener should be bound to, if - // unspecified, the listener accepts requests for all hostnames. - Hostname string - // Port is the port at which this listener should bind. - Port int - // Protocol is the protocol that a listener should use, it must - // either be "http" or "tcp" - Protocol string - // TLS is the TLS settings for the listener. - TLS APIGatewayTLSConfiguration -} - -// APIGatewayTLSConfiguration specifies the configuration of a listener’s -// TLS settings. -type APIGatewayTLSConfiguration struct { - // Certificates is a set of references to certificates - // that a gateway listener uses for TLS termination. - Certificates []ResourceReference - // MaxVersion is the maximum TLS version that the listener - // should support. - MaxVersion string `json:",omitempty" alias:"tls_max_version"` - // MinVersion is the minimum TLS version that the listener - // should support. - MinVersion string `json:",omitempty" alias:"tls_min_version"` - // Define a subset of cipher suites to restrict - // Only applicable to connections negotiated via TLS 1.2 or earlier - CipherSuites []string `json:",omitempty" alias:"cipher_suites"` -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go b/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go deleted file mode 100644 index 47a1ead0566..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// InlineCertificateConfigEntry -- TODO stub -type InlineCertificateConfigEntry struct { - // Kind of the config entry. This should be set to api.InlineCertificate. - Kind string - - // Name is used to match the config entry with its associated tcp-route - // service. This should match the name provided in the service definition. - Name string - - // Certificate is the public certificate component of an x509 key pair encoded in raw PEM format. - Certificate string - // PrivateKey is the private key component of an x509 key pair encoded in raw PEM format. - PrivateKey string `alias:"private_key"` - - Meta map[string]string `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} - -func (a *InlineCertificateConfigEntry) GetKind() string { return InlineCertificate } -func (a *InlineCertificateConfigEntry) GetName() string { return a.Name } -func (a *InlineCertificateConfigEntry) GetPartition() string { return a.Partition } -func (a *InlineCertificateConfigEntry) GetNamespace() string { return a.Namespace } -func (a *InlineCertificateConfigEntry) GetMeta() map[string]string { return a.Meta } -func (a *InlineCertificateConfigEntry) GetCreateIndex() uint64 { return a.CreateIndex } -func (a *InlineCertificateConfigEntry) GetModifyIndex() uint64 { return a.ModifyIndex } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go deleted file mode 100644 index 3f03b0875b9..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import "time" - -type ServiceIntentionsConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - - Sources []*SourceIntention - JWT *IntentionJWTRequirement `json:",omitempty"` - - Meta map[string]string `json:",omitempty"` - - CreateIndex uint64 - ModifyIndex uint64 -} - -type SourceIntention struct { - Name string - Peer string `json:",omitempty"` - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - SamenessGroup string `json:",omitempty" alias:"sameness_group"` - Action IntentionAction `json:",omitempty"` - Permissions []*IntentionPermission `json:",omitempty"` - Precedence int - Type IntentionSourceType - Description string `json:",omitempty"` - - LegacyID string `json:",omitempty" alias:"legacy_id"` - LegacyMeta map[string]string `json:",omitempty" alias:"legacy_meta"` - LegacyCreateTime *time.Time `json:",omitempty" alias:"legacy_create_time"` - LegacyUpdateTime *time.Time `json:",omitempty" alias:"legacy_update_time"` -} - -func (e *ServiceIntentionsConfigEntry) GetKind() string { return e.Kind } -func (e *ServiceIntentionsConfigEntry) GetName() string { return e.Name } -func (e *ServiceIntentionsConfigEntry) GetPartition() string { return e.Partition } -func (e *ServiceIntentionsConfigEntry) GetNamespace() string { return e.Namespace } -func (e *ServiceIntentionsConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *ServiceIntentionsConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ServiceIntentionsConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } - -type IntentionPermission struct { - Action IntentionAction - HTTP *IntentionHTTPPermission `json:",omitempty"` - JWT *IntentionJWTRequirement `json:",omitempty"` -} - -type IntentionHTTPPermission struct { - PathExact string `json:",omitempty" alias:"path_exact"` - PathPrefix string `json:",omitempty" alias:"path_prefix"` - PathRegex string `json:",omitempty" alias:"path_regex"` - - Header []IntentionHTTPHeaderPermission `json:",omitempty"` - - Methods []string `json:",omitempty"` -} - -type IntentionHTTPHeaderPermission struct { - Name string - Present bool `json:",omitempty"` - Exact string `json:",omitempty"` - Prefix string `json:",omitempty"` - Suffix string `json:",omitempty"` - Regex string `json:",omitempty"` - Invert bool `json:",omitempty"` -} - -type IntentionJWTRequirement struct { - // Providers is a list of providers to consider when verifying a JWT. - Providers []*IntentionJWTProvider `json:",omitempty"` -} - -type IntentionJWTProvider struct { - // Name is the name of the JWT provider. There MUST be a corresponding - // "jwt-provider" config entry with this name. - Name string `json:",omitempty"` - - // VerifyClaims is a list of additional claims to verify in a JWT's payload. - VerifyClaims []*IntentionJWTClaimVerification `json:",omitempty" alias:"verify_claims"` -} - -type IntentionJWTClaimVerification struct { - // Path is the path to the claim in the token JSON. - Path []string `json:",omitempty"` - - // Value is the expected value at the given path: - // - If the type at the path is a list then we verify - // that this value is contained in the list. - // - // - If the type at the path is a string then we verify - // that this value matches. - Value string `json:",omitempty"` -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go b/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go deleted file mode 100644 index 270f0d56415..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "time" -) - -const ( - DiscoveryTypeStrictDNS ClusterDiscoveryType = "STRICT_DNS" - DiscoveryTypeStatic ClusterDiscoveryType = "STATIC" - DiscoveryTypeLogicalDNS ClusterDiscoveryType = "LOGICAL_DNS" - DiscoveryTypeEDS ClusterDiscoveryType = "EDS" - DiscoveryTypeOriginalDST ClusterDiscoveryType = "ORIGINAL_DST" -) - -type JWTProviderConfigEntry struct { - // Kind is the kind of configuration entry and must be "jwt-provider". - Kind string `json:",omitempty"` - - // Name is the name of the provider being configured. - Name string `json:",omitempty"` - - // JSONWebKeySet defines a JSON Web Key Set, its location on disk, or the - // means with which to fetch a key set from a remote server. - JSONWebKeySet *JSONWebKeySet `json:",omitempty" alias:"json_web_key_set"` - - // Issuer is the entity that must have issued the JWT. - // This value must match the "iss" claim of the token. - Issuer string `json:",omitempty"` - - // Audiences is the set of audiences the JWT is allowed to access. - // If specified, all JWTs verified with this provider must address - // at least one of these to be considered valid. - Audiences []string `json:",omitempty"` - - // Locations where the JWT will be present in requests. - // Envoy will check all of these locations to extract a JWT. - // If no locations are specified Envoy will default to: - // 1. Authorization header with Bearer schema: - // "Authorization: Bearer " - // 2. access_token query parameter. - Locations []*JWTLocation `json:",omitempty"` - - // Forwarding defines rules for forwarding verified JWTs to the backend. - Forwarding *JWTForwardingConfig `json:",omitempty"` - - // ClockSkewSeconds specifies the maximum allowable time difference - // from clock skew when validating the "exp" (Expiration) and "nbf" - // (Not Before) claims. - // - // Default value is 30 seconds. - ClockSkewSeconds int `json:",omitempty" alias:"clock_skew_seconds"` - - // CacheConfig defines configuration for caching the validation - // result for previously seen JWTs. Caching results can speed up - // verification when individual tokens are expected to be handled - // multiple times. - CacheConfig *JWTCacheConfig `json:",omitempty" alias:"cache_config"` - - Meta map[string]string `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 `json:",omitempty"` - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 `json:",omitempty"` - - // Partition is the partition the JWTProviderConfigEntry applies to. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the JWTProviderConfigEntry applies to. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} - -// JWTLocation is a location where the JWT could be present in requests. -// -// Only one of Header, QueryParam, or Cookie can be specified. -type JWTLocation struct { - // Header defines how to extract a JWT from an HTTP request header. - Header *JWTLocationHeader `json:",omitempty"` - - // QueryParam defines how to extract a JWT from an HTTP request - // query parameter. - QueryParam *JWTLocationQueryParam `json:",omitempty" alias:"query_param"` - - // Cookie defines how to extract a JWT from an HTTP request cookie. - Cookie *JWTLocationCookie `json:",omitempty"` -} - -// JWTLocationHeader defines how to extract a JWT from an HTTP -// request header. -type JWTLocationHeader struct { - // Name is the name of the header containing the token. - Name string `json:",omitempty"` - - // ValuePrefix is an optional prefix that precedes the token in the - // header value. - // For example, "Bearer " is a standard value prefix for a header named - // "Authorization", but the prefix is not part of the token itself: - // "Authorization: Bearer " - ValuePrefix string `json:",omitempty" alias:"value_prefix"` - - // Forward defines whether the header with the JWT should be - // forwarded after the token has been verified. If false, the - // header will not be forwarded to the backend. - // - // Default value is false. - Forward bool `json:",omitempty"` -} - -// JWTLocationQueryParam defines how to extract a JWT from an HTTP request query parameter. -type JWTLocationQueryParam struct { - // Name is the name of the query param containing the token. - Name string `json:",omitempty"` -} - -// JWTLocationCookie defines how to extract a JWT from an HTTP request cookie. -type JWTLocationCookie struct { - // Name is the name of the cookie containing the token. - Name string `json:",omitempty"` -} - -type JWTForwardingConfig struct { - // HeaderName is a header name to use when forwarding a verified - // JWT to the backend. The verified JWT could have been extracted - // from any location (query param, header, or cookie). - // - // The header value will be base64-URL-encoded, and will not be - // padded unless PadForwardPayloadHeader is true. - HeaderName string `json:",omitempty" alias:"header_name"` - - // PadForwardPayloadHeader determines whether padding should be added - // to the base64 encoded token forwarded with ForwardPayloadHeader. - // - // Default value is false. - PadForwardPayloadHeader bool `json:",omitempty" alias:"pad_forward_payload_header"` -} - -// JSONWebKeySet defines a key set, its location on disk, or the -// means with which to fetch a key set from a remote server. -// -// Exactly one of Local or Remote must be specified. -type JSONWebKeySet struct { - // Local specifies a local source for the key set. - Local *LocalJWKS `json:",omitempty"` - - // Remote specifies how to fetch a key set from a remote server. - Remote *RemoteJWKS `json:",omitempty"` -} - -// LocalJWKS specifies a location for a local JWKS. -// -// Only one of String and Filename can be specified. -type LocalJWKS struct { - // JWKS contains a base64 encoded JWKS. - JWKS string `json:",omitempty"` - - // Filename configures a location on disk where the JWKS can be - // found. If specified, the file must be present on the disk of ALL - // proxies with intentions referencing this provider. - Filename string `json:",omitempty"` -} - -// RemoteJWKS specifies how to fetch a JWKS from a remote server. -type RemoteJWKS struct { - // URI is the URI of the server to query for the JWKS. - URI string `json:",omitempty"` - - // RequestTimeoutMs is the number of milliseconds to - // time out when making a request for the JWKS. - RequestTimeoutMs int `json:",omitempty" alias:"request_timeout_ms"` - - // CacheDuration is the duration after which cached keys - // should be expired. - // - // Default value is 5 minutes. - CacheDuration time.Duration `json:",omitempty" alias:"cache_duration"` - - // FetchAsynchronously indicates that the JWKS should be fetched - // when a client request arrives. Client requests will be paused - // until the JWKS is fetched. - // If false, the proxy listener will wait for the JWKS to be - // fetched before being activated. - // - // Default value is false. - FetchAsynchronously bool `json:",omitempty" alias:"fetch_asynchronously"` - - // RetryPolicy defines a retry policy for fetching JWKS. - // - // There is no retry by default. - RetryPolicy *JWKSRetryPolicy `json:",omitempty" alias:"retry_policy"` - - // JWKSCluster defines how the specified Remote JWKS URI is to be fetched. - JWKSCluster *JWKSCluster `json:",omitempty" alias:"jwks_cluster"` -} - -type JWKSCluster struct { - // DiscoveryType refers to the service discovery type to use for resolving the cluster. - // - // This defaults to STRICT_DNS. - // Other options include STATIC, LOGICAL_DNS, EDS or ORIGINAL_DST. - DiscoveryType ClusterDiscoveryType `json:",omitempty" alias:"discovery_type"` - - // TLSCertificates refers to the data containing certificate authority certificates to use - // in verifying a presented peer certificate. - // If not specified and a peer certificate is presented it will not be verified. - // - // Must be either CaCertificateProviderInstance or TrustedCA. - TLSCertificates *JWKSTLSCertificate `json:",omitempty" alias:"tls_certificates"` - - // The timeout for new network connections to hosts in the cluster. - // If not set, a default value of 5s will be used. - ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` -} - -type ClusterDiscoveryType string - -// JWKSTLSCertificate refers to the data containing certificate authority certificates to use -// in verifying a presented peer certificate. -// If not specified and a peer certificate is presented it will not be verified. -// -// Must be either CaCertificateProviderInstance or TrustedCA. -type JWKSTLSCertificate struct { - // CaCertificateProviderInstance Certificate provider instance for fetching TLS certificates. - CaCertificateProviderInstance *JWKSTLSCertProviderInstance `json:",omitempty" alias:"ca_certificate_provider_instance"` - - // TrustedCA defines TLS certificate data containing certificate authority certificates - // to use in verifying a presented peer certificate. - // - // Exactly one of Filename, EnvironmentVariable, InlineString or InlineBytes must be specified. - TrustedCA *JWKSTLSCertTrustedCA `json:",omitempty" alias:"trusted_ca"` -} - -// JWKSTLSCertTrustedCA defines TLS certificate data containing certificate authority certificates -// to use in verifying a presented peer certificate. -// -// Exactly one of Filename, EnvironmentVariable, InlineString or InlineBytes must be specified. -type JWKSTLSCertTrustedCA struct { - Filename string `json:",omitempty" alias:"filename"` - EnvironmentVariable string `json:",omitempty" alias:"environment_variable"` - InlineString string `json:",omitempty" alias:"inline_string"` - InlineBytes []byte `json:",omitempty" alias:"inline_bytes"` -} - -type JWKSTLSCertProviderInstance struct { - // InstanceName refers to the certificate provider instance name - // - // The default value is "default". - InstanceName string `json:",omitempty" alias:"instance_name"` - - // CertificateName is used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "example.com" to specify a certificate for a - // particular domain. - // - // The default value is the empty string. - CertificateName string `json:",omitempty" alias:"certificate_name"` -} - -type JWKSRetryPolicy struct { - // NumRetries is the number of times to retry fetching the JWKS. - // The retry strategy uses jittered exponential backoff with - // a base interval of 1s and max of 10s. - // - // Default value is 0. - NumRetries int `json:",omitempty" alias:"num_retries"` - - // Backoff policy - // - // Defaults to Envoy's backoff policy - RetryPolicyBackOff *RetryPolicyBackOff `json:",omitempty" alias:"retry_policy_back_off"` -} - -type RetryPolicyBackOff struct { - // BaseInterval to be used for the next back off computation - // - // The default value from envoy is 1s - BaseInterval time.Duration `json:",omitempty" alias:"base_interval"` - - // MaxInternal to be used to specify the maximum interval between retries. - // Optional but should be greater or equal to BaseInterval. - // - // Defaults to 10 times BaseInterval - MaxInterval time.Duration `json:",omitempty" alias:"max_interval"` -} - -type JWTCacheConfig struct { - // Size specifies the maximum number of JWT verification - // results to cache. - // - // Defaults to 0, meaning that JWT caching is disabled. - Size int `json:",omitempty"` -} - -func (e *JWTProviderConfigEntry) GetKind() string { - return JWTProvider -} - -func (e *JWTProviderConfigEntry) GetName() string { return e.Name } -func (e *JWTProviderConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *JWTProviderConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *JWTProviderConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } -func (e *JWTProviderConfigEntry) GetPartition() string { return e.Partition } -func (e *JWTProviderConfigEntry) GetNamespace() string { return e.Namespace } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go deleted file mode 100644 index 1a1ebb8b536..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "encoding/json" -) - -// MeshConfigEntry manages the global configuration for all service mesh -// proxies. -type MeshConfigEntry struct { - // Partition is the partition the MeshConfigEntry applies to. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the MeshConfigEntry applies to. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // TransparentProxy applies configuration specific to proxies - // in transparent mode. - TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"` - - // AllowEnablingPermissiveMutualTLS must be true in order to allow setting - // MutualTLSMode=permissive in either service-defaults or proxy-defaults. - AllowEnablingPermissiveMutualTLS bool `json:",omitempty" alias:"allow_enabling_permissive_mutual_tls"` - - TLS *MeshTLSConfig `json:",omitempty"` - - HTTP *MeshHTTPConfig `json:",omitempty"` - - Peering *PeeringMeshConfig `json:",omitempty"` - - Meta map[string]string `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 -} - -type TransparentProxyMeshConfig struct { - MeshDestinationsOnly bool `alias:"mesh_destinations_only"` -} - -type MeshTLSConfig struct { - Incoming *MeshDirectionalTLSConfig `json:",omitempty"` - Outgoing *MeshDirectionalTLSConfig `json:",omitempty"` -} - -type MeshDirectionalTLSConfig struct { - TLSMinVersion string `json:",omitempty" alias:"tls_min_version"` - TLSMaxVersion string `json:",omitempty" alias:"tls_max_version"` - CipherSuites []string `json:",omitempty" alias:"cipher_suites"` -} - -type MeshHTTPConfig struct { - SanitizeXForwardedClientCert bool `alias:"sanitize_x_forwarded_client_cert"` -} - -type PeeringMeshConfig struct { - PeerThroughMeshGateways bool `json:",omitempty" alias:"peer_through_mesh_gateways"` -} - -func (e *MeshConfigEntry) GetKind() string { return MeshConfig } -func (e *MeshConfigEntry) GetName() string { return MeshConfigMesh } -func (e *MeshConfigEntry) GetPartition() string { return e.Partition } -func (e *MeshConfigEntry) GetNamespace() string { return e.Namespace } -func (e *MeshConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *MeshConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *MeshConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } - -// MarshalJSON adds the Kind field so that the JSON can be decoded back into the -// correct type. -func (e *MeshConfigEntry) MarshalJSON() ([]byte, error) { - type Alias MeshConfigEntry - source := &struct { - Kind string - *Alias - }{ - Kind: MeshConfig, - Alias: (*Alias)(e), - } - return json.Marshal(source) -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go deleted file mode 100644 index 8df7d4c98e7..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -type ReadWriteRatesConfig struct { - ReadRate float64 - WriteRate float64 -} - -type RateLimitIPConfigEntry struct { - // Kind of the config entry. This will be set to structs.RateLimitIPConfig - Kind string - Name string - Mode string // {permissive, enforcing, disabled} - - Meta map[string]string `json:",omitempty"` - // overall limits - ReadRate float64 - WriteRate float64 - - //limits specific to a type of call - ACL *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryACL OperationCategory = "ACL" - Catalog *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryCatalog OperationCategory = "Catalog" - ConfigEntry *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryConfigEntry OperationCategory = "ConfigEntry" - ConnectCA *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryConnectCA OperationCategory = "ConnectCA" - Coordinate *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryCoordinate OperationCategory = "Coordinate" - DiscoveryChain *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDiscoveryChain OperationCategory = "DiscoveryChain" - ServerDiscovery *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryServerDiscovery OperationCategory = "ServerDiscovery" - Health *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryHealth OperationCategory = "Health" - Intention *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryIntention OperationCategory = "Intention" - KV *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryKV OperationCategory = "KV" - Tenancy *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPartition OperationCategory = "Tenancy" - PreparedQuery *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPreparedQuery OperationCategory = "PreparedQuery" - Session *ReadWriteRatesConfig `json:",omitempty"` // OperationCategorySession OperationCategory = "Session" - Txn *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryTxn OperationCategory = "Txn" - AutoConfig *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryAutoConfig OperationCategory = "AutoConfig" - FederationState *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryFederationState OperationCategory = "FederationState" - Internal *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryInternal OperationCategory = "Internal" - PeerStream *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPeerStream OperationCategory = "PeerStream" - Peering *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPeering OperationCategory = "Peering" - DataPlane *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDataPlane OperationCategory = "DataPlane" - DNS *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDNS OperationCategory = "DNS" - Subscribe *ReadWriteRatesConfig `json:",omitempty"` // OperationCategorySubscribe OperationCategory = "Subscribe" - Resource *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryResource OperationCategory = "Resource" - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 -} - -func (r *RateLimitIPConfigEntry) GetKind() string { - return RateLimitIPConfig -} -func (r *RateLimitIPConfigEntry) GetName() string { - if r == nil { - return "" - } - return r.Name -} -func (r *RateLimitIPConfigEntry) GetPartition() string { - return r.Partition -} -func (r *RateLimitIPConfigEntry) GetNamespace() string { - return r.Namespace -} -func (r *RateLimitIPConfigEntry) GetMeta() map[string]string { - if r == nil { - return nil - } - return r.Meta -} -func (r *RateLimitIPConfigEntry) GetCreateIndex() uint64 { - return r.CreateIndex -} -func (r *RateLimitIPConfigEntry) GetModifyIndex() uint64 { - return r.ModifyIndex -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go deleted file mode 100644 index cfea394535d..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// TCPRouteConfigEntry -- TODO stub -type TCPRouteConfigEntry struct { - // Kind of the config entry. This should be set to api.TCPRoute. - Kind string - - // Name is used to match the config entry with its associated tcp-route - // service. This should match the name provided in the service definition. - Name string - - // Parents is a list of gateways that this route should be bound to. - Parents []ResourceReference - // Services is a list of TCP-based services that this should route to. - // Currently, this must specify at maximum one service. - Services []TCPService - - Meta map[string]string `json:",omitempty"` - - // Status is the asynchronous status which a TCPRoute propagates to the user. - Status ConfigEntryStatus - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} - -func (a *TCPRouteConfigEntry) GetKind() string { return TCPRoute } -func (a *TCPRouteConfigEntry) GetName() string { return a.Name } -func (a *TCPRouteConfigEntry) GetPartition() string { return a.Partition } -func (a *TCPRouteConfigEntry) GetNamespace() string { return a.Namespace } -func (a *TCPRouteConfigEntry) GetMeta() map[string]string { return a.Meta } -func (a *TCPRouteConfigEntry) GetCreateIndex() uint64 { return a.CreateIndex } -func (a *TCPRouteConfigEntry) GetModifyIndex() uint64 { return a.ModifyIndex } - -// TCPService is a service reference for a TCPRoute -type TCPService struct { - Name string - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} - -// HTTPRouteConfigEntry manages the configuration for a HTTP route -// with the given name. -type HTTPRouteConfigEntry struct { - // Kind of the config entry. This should be set to api.HTTPRoute. - Kind string - - // Name is used to match the config entry with its associated http-route. - Name string - - // Parents is a list of gateways that this route should be bound to - Parents []ResourceReference - // Rules are a list of HTTP-based routing rules that this route should - // use for constructing a routing table. - Rules []HTTPRouteRule - // Hostnames are the hostnames for which this HTTPRoute should respond to requests. - Hostnames []string - - Meta map[string]string `json:",omitempty"` - - // CreateIndex is the Raft index this entry was created at. This is a - // read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Status is the asynchronous status which an HTTPRoute propagates to the user. - Status ConfigEntryStatus -} - -func (r *HTTPRouteConfigEntry) GetKind() string { return HTTPRoute } -func (r *HTTPRouteConfigEntry) GetName() string { return r.Name } -func (r *HTTPRouteConfigEntry) GetPartition() string { return r.Partition } -func (r *HTTPRouteConfigEntry) GetNamespace() string { return r.Namespace } -func (r *HTTPRouteConfigEntry) GetMeta() map[string]string { return r.Meta } -func (r *HTTPRouteConfigEntry) GetCreateIndex() uint64 { return r.CreateIndex } -func (r *HTTPRouteConfigEntry) GetModifyIndex() uint64 { return r.ModifyIndex } - -// HTTPMatch specifies the criteria that should be -// used in determining whether or not a request should -// be routed to a given set of services. -type HTTPMatch struct { - Headers []HTTPHeaderMatch - Method HTTPMatchMethod - Path HTTPPathMatch - Query []HTTPQueryMatch -} - -// HTTPMatchMethod specifies which type of HTTP verb should -// be used for matching a given request. -type HTTPMatchMethod string - -const ( - HTTPMatchMethodAll HTTPMatchMethod = "" - HTTPMatchMethodConnect HTTPMatchMethod = "CONNECT" - HTTPMatchMethodDelete HTTPMatchMethod = "DELETE" - HTTPMatchMethodGet HTTPMatchMethod = "GET" - HTTPMatchMethodHead HTTPMatchMethod = "HEAD" - HTTPMatchMethodOptions HTTPMatchMethod = "OPTIONS" - HTTPMatchMethodPatch HTTPMatchMethod = "PATCH" - HTTPMatchMethodPost HTTPMatchMethod = "POST" - HTTPMatchMethodPut HTTPMatchMethod = "PUT" - HTTPMatchMethodTrace HTTPMatchMethod = "TRACE" -) - -// HTTPHeaderMatchType specifies how header matching criteria -// should be applied to a request. -type HTTPHeaderMatchType string - -const ( - HTTPHeaderMatchExact HTTPHeaderMatchType = "exact" - HTTPHeaderMatchPrefix HTTPHeaderMatchType = "prefix" - HTTPHeaderMatchPresent HTTPHeaderMatchType = "present" - HTTPHeaderMatchRegularExpression HTTPHeaderMatchType = "regex" - HTTPHeaderMatchSuffix HTTPHeaderMatchType = "suffix" -) - -// HTTPHeaderMatch specifies how a match should be done -// on a request's headers. -type HTTPHeaderMatch struct { - Match HTTPHeaderMatchType - Name string - Value string -} - -// HTTPPathMatchType specifies how path matching criteria -// should be applied to a request. -type HTTPPathMatchType string - -const ( - HTTPPathMatchExact HTTPPathMatchType = "exact" - HTTPPathMatchPrefix HTTPPathMatchType = "prefix" - HTTPPathMatchRegularExpression HTTPPathMatchType = "regex" -) - -// HTTPPathMatch specifies how a match should be done -// on a request's path. -type HTTPPathMatch struct { - Match HTTPPathMatchType - Value string -} - -// HTTPQueryMatchType specifies how querys matching criteria -// should be applied to a request. -type HTTPQueryMatchType string - -const ( - HTTPQueryMatchExact HTTPQueryMatchType = "exact" - HTTPQueryMatchPresent HTTPQueryMatchType = "present" - HTTPQueryMatchRegularExpression HTTPQueryMatchType = "regex" -) - -// HTTPQueryMatch specifies how a match should be done -// on a request's query parameters. -type HTTPQueryMatch struct { - Match HTTPQueryMatchType - Name string - Value string -} - -// HTTPFilters specifies a list of filters used to modify a request -// before it is routed to an upstream. -type HTTPFilters struct { - Headers []HTTPHeaderFilter - URLRewrite *URLRewrite -} - -// HTTPHeaderFilter specifies how HTTP headers should be modified. -type HTTPHeaderFilter struct { - Add map[string]string - Remove []string - Set map[string]string -} - -type URLRewrite struct { - Path string -} - -// HTTPRouteRule specifies the routing rules used to determine what upstream -// service an HTTP request is routed to. -type HTTPRouteRule struct { - // Filters is a list of HTTP-based filters used to modify a request prior - // to routing it to the upstream service - Filters HTTPFilters - // Matches specified the matching criteria used in the routing table. If a - // request matches the given HTTPMatch configuration, then traffic is routed - // to services specified in the Services field. - Matches []HTTPMatch - // Services is a list of HTTP-based services to route to if the request matches - // the rules specified in the Matches field. - Services []HTTPService -} - -// HTTPService is a service reference for HTTP-based routing rules -type HTTPService struct { - Name string - // Weight is an arbitrary integer used in calculating how much - // traffic should be sent to the given service. - Weight int - // Filters is a list of HTTP-based filters used to modify a request prior - // to routing it to the upstream service - Filters HTTPFilters - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go b/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go deleted file mode 100644 index 1217efe7d2c..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -type SamenessGroupConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - DefaultForFailover bool `json:",omitempty" alias:"default_for_failover"` - IncludeLocal bool `json:",omitempty" alias:"include_local"` - Members []SamenessGroupMember - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 -} - -type SamenessGroupMember struct { - Partition string `json:",omitempty"` - Peer string `json:",omitempty"` -} - -func (s *SamenessGroupConfigEntry) GetKind() string { return s.Kind } -func (s *SamenessGroupConfigEntry) GetName() string { return s.Name } -func (s *SamenessGroupConfigEntry) GetPartition() string { return s.Partition } -func (s *SamenessGroupConfigEntry) GetNamespace() string { return "" } -func (s *SamenessGroupConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex } -func (s *SamenessGroupConfigEntry) GetModifyIndex() uint64 { return s.ModifyIndex } -func (s *SamenessGroupConfigEntry) GetMeta() map[string]string { return s.Meta } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_status.go b/vendor/github.com/hashicorp/consul/api/config_entry_status.go deleted file mode 100644 index 2d16ea0fc4e..00000000000 --- a/vendor/github.com/hashicorp/consul/api/config_entry_status.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "fmt" - "time" - - "golang.org/x/exp/slices" -) - -// ResourceReference is a reference to a ConfigEntry -// with an optional reference to a subsection of that ConfigEntry -// that can be specified as SectionName -type ResourceReference struct { - // Kind is the kind of ConfigEntry that this resource refers to. - Kind string - // Name is the identifier for the ConfigEntry this resource refers to. - Name string - // SectionName is a generic subresource identifier that specifies - // a subset of the ConfigEntry to which this reference applies. Usage - // of this field should be up to the controller that leverages it. If - // unused, this should be blank. - SectionName string - - // Partition is the partition the config entry is associated with. - // Partitioning is a Consul Enterprise feature. - Partition string `json:",omitempty"` - - // Namespace is the namespace the config entry is associated with. - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` -} - -// ConfigEntryStatus is used for propagating back asynchronously calculated -// messages from control loops to a user -type ConfigEntryStatus struct { - // Conditions is the set of condition objects associated with - // a ConfigEntry status. - Conditions []Condition -} - -// Condition is used for a single message and state associated -// with an object. For example, a ConfigEntry that references -// multiple other resources may have different statuses with -// respect to each of those resources. -type Condition struct { - // Type is a value from a bounded set of types that an object might have - Type string - // Status is a value from a bounded set of statuses that an object might have - Status ConditionStatus - // Reason is a value from a bounded set of reasons for a given status - Reason string - // Message is a message that gives more detailed information about - // why a Condition has a given status and reason - Message string - // Resource is an optional reference to a resource for which this - // condition applies - Resource *ResourceReference - // LastTransitionTime is the time at which this Condition was created - LastTransitionTime *time.Time -} - -type ( - ConditionStatus string -) - -const ( - ConditionStatusTrue ConditionStatus = "True" - ConditionStatusFalse ConditionStatus = "False" - ConditionStatusUnknown ConditionStatus = "Unknown" -) - -// GatewayConditionType is a type of condition associated with a -// Gateway. This type should be used with the GatewayStatus.Conditions -// field. -type GatewayConditionType string - -// GatewayConditionReason defines the set of reasons that explain why a -// particular Gateway condition type has been raised. -type GatewayConditionReason string - -// the following are directly from the k8s spec -const ( - // This condition is true when the controller managing the Gateway is - // syntactically and semantically valid enough to produce some configuration - // in the underlying data plane. This does not indicate whether or not the - // configuration has been propagated to the data plane. - // - // Possible reasons for this condition to be True are: - // - // * "Accepted" - // - // Possible reasons for this condition to be False are: - // - // * InvalidCertificates - // - GatewayConditionAccepted GatewayConditionType = "Accepted" - - // This reason is used with the "Accepted" condition when the condition is - // True. - GatewayReasonAccepted GatewayConditionReason = "Accepted" - - // This reason is used with the "Accepted" condition when the gateway has multiple invalid - // certificates and cannot bind to any routes - GatewayReasonInvalidCertificates GatewayConditionReason = "InvalidCertificates" - - // This condition indicates that the gateway was unable to resolve - // conflicting specification requirements for this Listener. If a - // Listener is conflicted, its network port should not be configured - // on any network elements. - // - // Possible reasons for this condition to be true are: - // - // * "RouteConflict" - // - // Possible reasons for this condition to be False are: - // - // * "NoConflict" - // - // Controllers may raise this condition with other reasons, - // but should prefer to use the reasons listed above to improve - // interoperability. - GatewayConditionConflicted GatewayConditionType = "Conflicted" - // This reason is used with the "Conflicted" condition when the condition - // is False. - GatewayReasonNoConflict GatewayConditionReason = "NoConflict" - // This reason is used with the "Conflicted" condition when the route is - // in a conflicted state, such as when a TCPListener attempts to bind to two routes - GatewayReasonRouteConflict GatewayConditionReason = "RouteConflict" - - // This condition indicates whether the controller was able to - // resolve all the object references for the Gateway. When setting this - // condition to False, a ResourceReference to the misconfigured Listener should - // be provided. - // - // Possible reasons for this condition to be true are: - // - // * "ResolvedRefs" - // - // Possible reasons for this condition to be False are: - // - // * "InvalidCertificateRef" - // * "InvalidRouteKinds" - // * "RefNotPermitted" - // - GatewayConditionResolvedRefs GatewayConditionType = "ResolvedRefs" - - // This reason is used with the "ResolvedRefs" condition when the condition - // is true. - GatewayReasonResolvedRefs GatewayConditionReason = "ResolvedRefs" - - // This reason is used with the "ResolvedRefs" condition when a - // Listener has a TLS configuration with at least one TLS CertificateRef - // that is invalid or does not exist. - // A CertificateRef is considered invalid when it refers to a nonexistent - // or unsupported resource or kind, or when the data within that resource - // is malformed. - // This reason must be used only when the reference is allowed, either by - // referencing an object in the same namespace as the Gateway, or when - // a cross-namespace reference has been explicitly allowed by a ReferenceGrant. - // If the reference is not allowed, the reason RefNotPermitted must be used - // instead. - GatewayListenerReasonInvalidCertificateRef GatewayConditionReason = "InvalidCertificateRef" -) - -var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[ConditionStatus][]GatewayConditionReason{ - GatewayConditionAccepted: { - ConditionStatusTrue: { - GatewayReasonAccepted, - }, - ConditionStatusFalse: { - GatewayReasonInvalidCertificates, - }, - ConditionStatusUnknown: {}, - }, - GatewayConditionConflicted: { - ConditionStatusTrue: { - GatewayReasonRouteConflict, - }, - ConditionStatusFalse: { - GatewayReasonNoConflict, - }, - ConditionStatusUnknown: {}, - }, - GatewayConditionResolvedRefs: { - ConditionStatusTrue: { - GatewayReasonResolvedRefs, - }, - ConditionStatusFalse: { - GatewayListenerReasonInvalidCertificateRef, - }, - ConditionStatusUnknown: {}, - }, -} - -func ValidateGatewayConditionReason(name GatewayConditionType, status ConditionStatus, reason GatewayConditionReason) error { - if err := checkConditionStatus(status); err != nil { - return err - } - - reasons, ok := validGatewayConditionReasonsMapping[name] - if !ok { - return fmt.Errorf("unrecognized GatewayConditionType %q", name) - } - - reasonsForStatus, ok := reasons[status] - if !ok { - return fmt.Errorf("unrecognized ConditionStatus %q", status) - } - - if !slices.Contains(reasonsForStatus, reason) { - return fmt.Errorf("gateway condition reason %q not allowed for gateway condition type %q with status %q", reason, name, status) - } - return nil -} - -// RouteConditionType is a type of condition for a route. -type RouteConditionType string - -// RouteConditionReason is a reason for a route condition. -type RouteConditionReason string - -// The following statuses are taken from the K8's Spec -// With the exception of: "RouteReasonInvalidDiscoveryChain" and "NoUpstreamServicesTargeted" -const ( - // This condition indicates whether the route has been accepted or rejected - // by a Gateway, and why. - // - // Possible reasons for this condition to be true are: - // - // * "Accepted" - // - // Possible reasons for this condition to be False are: - // - // * "InvalidDiscoveryChain" - // * "NoUpstreamServicesTargeted" - // - // - // Controllers may raise this condition with other reasons, - // but should prefer to use the reasons listed above to improve - // interoperability. - RouteConditionAccepted RouteConditionType = "Accepted" - - // This reason is used with the "Accepted" condition when the Route has been - // accepted by the Gateway. - RouteReasonAccepted RouteConditionReason = "Accepted" - - // This reason is used with the "Accepted" condition when the route has an - // invalid discovery chain, this includes conditions like the protocol being invalid - // or the discovery chain failing to compile - RouteReasonInvalidDiscoveryChain RouteConditionReason = "InvalidDiscoveryChain" - - // This reason is used with the "Accepted" condition when the route - RouteReasonNoUpstreamServicesTargeted RouteConditionReason = "NoUpstreamServicesTargeted" -) - -// the following statuses are custom to Consul -const ( - // This condition indicates whether the route was able to successfully bind the - // Listener on the gateway - // Possible reasons for this condition to be true are: - // - // * "Bound" - // - // Possible reasons for this condition to be false are: - // - // * "FailedToBind" - // * "GatewayNotFound" - // - RouteConditionBound RouteConditionType = "Bound" - - // This reason is used with the "Bound" condition when the condition - // is true - RouteReasonBound RouteConditionReason = "Bound" - - // This reason is used with the "Bound" condition when the route failed - // to bind to the gateway - RouteReasonFailedToBind RouteConditionReason = "FailedToBind" - - // This reason is used with the "Bound" condition when the route fails - // to find the gateway - RouteReasonGatewayNotFound RouteConditionReason = "GatewayNotFound" -) - -var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStatus][]RouteConditionReason{ - RouteConditionAccepted: { - ConditionStatusTrue: { - RouteReasonAccepted, - }, - ConditionStatusFalse: { - RouteReasonInvalidDiscoveryChain, - RouteReasonNoUpstreamServicesTargeted, - }, - ConditionStatusUnknown: {}, - }, - RouteConditionBound: { - ConditionStatusTrue: { - RouteReasonBound, - }, - ConditionStatusFalse: { - RouteReasonGatewayNotFound, - RouteReasonFailedToBind, - }, - ConditionStatusUnknown: {}, - }, -} - -func ValidateRouteConditionReason(name RouteConditionType, status ConditionStatus, reason RouteConditionReason) error { - if err := checkConditionStatus(status); err != nil { - return err - } - - reasons, ok := validRouteConditionReasonsMapping[name] - if !ok { - return fmt.Errorf("unrecognized RouteConditionType %s", name) - } - - reasonsForStatus, ok := reasons[status] - if !ok { - return fmt.Errorf("unrecognized ConditionStatus %s", name) - } - - if !slices.Contains(reasonsForStatus, reason) { - return fmt.Errorf("route condition reason %s not allowed for route condition type %s with status %s", reason, name, status) - } - - return nil -} - -func checkConditionStatus(status ConditionStatus) error { - switch status { - case ConditionStatusTrue, ConditionStatusFalse, ConditionStatusUnknown: - return nil - default: - return fmt.Errorf("unrecognized condition status: %q", status) - } -} diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go deleted file mode 100644 index 77be00034d0..00000000000 --- a/vendor/github.com/hashicorp/consul/api/connect.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// TelemetryCollectorName is the service name for the Consul Telemetry Collector -const TelemetryCollectorName string = "consul-telemetry-collector" - -// Connect can be used to work with endpoints related to Connect, the -// feature for securely connecting services within Consul. -type Connect struct { - c *Client -} - -// Connect returns a handle to the connect-related endpoints -func (c *Client) Connect() *Connect { - return &Connect{c} -} diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go deleted file mode 100644 index 8a5c9f870e9..00000000000 --- a/vendor/github.com/hashicorp/consul/api/connect_ca.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "fmt" - "time" - - "github.com/mitchellh/mapstructure" -) - -// CAConfig is the structure for the Connect CA configuration. -type CAConfig struct { - // Provider is the CA provider implementation to use. - Provider string - - // Configuration is arbitrary configuration for the provider. This - // should only contain primitive values and containers (such as lists - // and maps). - Config map[string]interface{} - - // State is read-only data that the provider might have persisted for use - // after restart or leadership transition. For example this might include - // UUIDs of resources it has created. Setting this when writing a - // configuration is an error. - State map[string]string - - // ForceWithoutCrossSigning indicates that the CA reconfiguration should go - // ahead even if the current CA is unable to cross sign certificates. This - // risks temporary connection failures during the rollout as new leafs will be - // rejected by proxies that have not yet observed the new root cert but is the - // only option if a CA that doesn't support cross signing needs to be - // reconfigured or mirated away from. - ForceWithoutCrossSigning bool - - CreateIndex uint64 - ModifyIndex uint64 -} - -// CommonCAProviderConfig is the common options available to all CA providers. -type CommonCAProviderConfig struct { - LeafCertTTL time.Duration - RootCertTTL time.Duration - SkipValidate bool - CSRMaxPerSecond float32 - CSRMaxConcurrent int -} - -// ConsulCAProviderConfig is the config for the built-in Consul CA provider. -type ConsulCAProviderConfig struct { - CommonCAProviderConfig `mapstructure:",squash"` - - PrivateKey string - RootCert string - IntermediateCertTTL time.Duration -} - -// ParseConsulCAConfig takes a raw config map and returns a parsed -// ConsulCAProviderConfig. -func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { - var config ConsulCAProviderConfig - decodeConf := &mapstructure.DecoderConfig{ - DecodeHook: mapstructure.StringToTimeDurationHookFunc(), - Result: &config, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decodeConf) - if err != nil { - return nil, err - } - - if err := decoder.Decode(raw); err != nil { - return nil, fmt.Errorf("error decoding config: %s", err) - } - - return &config, nil -} - -// CARootList is the structure for the results of listing roots. -type CARootList struct { - ActiveRootID string - TrustDomain string - Roots []*CARoot -} - -// CARoot represents a root CA certificate that is trusted. -type CARoot struct { - // ID is a globally unique ID (UUID) representing this CA root. - ID string - - // Name is a human-friendly name for this CA root. This value is - // opaque to Consul and is not used for anything internally. - Name string - - // RootCertPEM is the PEM-encoded public certificate. - RootCertPEM string `json:"RootCert"` - - // Active is true if this is the current active CA. This must only - // be true for exactly one CA. For any method that modifies roots in the - // state store, tests should be written to verify that multiple roots - // cannot be active. - Active bool - - CreateIndex uint64 - ModifyIndex uint64 -} - -// LeafCert is a certificate that has been issued by a Connect CA. -type LeafCert struct { - // SerialNumber is the unique serial number for this certificate. - // This is encoded in standard hex separated by :. - SerialNumber string - - // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private - // key for that cert, respectively. This should not be stored in the - // state store, but is present in the sign API response. - CertPEM string `json:",omitempty"` - PrivateKeyPEM string `json:",omitempty"` - - // Service is the name of the service for which the cert was issued. - // ServiceURI is the cert URI value. - Service string - ServiceURI string - - // ValidAfter and ValidBefore are the validity periods for the - // certificate. - ValidAfter time.Time - ValidBefore time.Time - - CreateIndex uint64 - ModifyIndex uint64 -} - -// CARoots queries the list of available roots. -func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/ca/roots") - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out CARootList - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// CAGetConfig returns the current CA configuration. -func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/ca/configuration") - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out CAConfig - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// CASetConfig sets the current CA configuration. -func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) { - r := h.c.newRequest("PUT", "/v1/connect/ca/configuration") - r.setWriteOptions(q) - r.obj = conf - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go deleted file mode 100644 index e91c03e8b71..00000000000 --- a/vendor/github.com/hashicorp/consul/api/connect_intention.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bytes" - "fmt" - "io" - "time" -) - -// Intention defines an intention for the Connect Service Graph. This defines -// the allowed or denied behavior of a connection between two services using -// Connect. -type Intention struct { - // ID is the UUID-based ID for the intention, always generated by Consul. - ID string `json:",omitempty"` - - // Description is a human-friendly description of this intention. - // It is opaque to Consul and is only stored and transferred in API - // requests. - Description string `json:",omitempty"` - - // SourceNS, SourceName are the namespace and name, respectively, of - // the source service. Either of these may be the wildcard "*", but only - // the full value can be a wildcard. Partial wildcards are not allowed. - // The source may also be a non-Consul service, as specified by SourceType. - // - // DestinationNS, DestinationName is the same, but for the destination - // service. The same rules apply. The destination is always a Consul - // service. - SourceNS, SourceName string - DestinationNS, DestinationName string - - // SourcePartition and DestinationPartition cannot be wildcards "*" and - // are not compatible with legacy intentions. - SourcePartition string `json:",omitempty"` - DestinationPartition string `json:",omitempty"` - - // SourcePeer cannot be a wildcard "*" and is not compatible with legacy - // intentions. Cannot be used with SourcePartition, as both represent the - // same level of tenancy (partition is local to cluster, peer is remote). - SourcePeer string `json:",omitempty"` - - // SourceSamenessGroup cannot be wildcards "*" and - // is not compatible with legacy intentions. - SourceSamenessGroup string `json:",omitempty"` - - // SourceType is the type of the value for the source. - SourceType IntentionSourceType - - // Action is whether this is an allowlist or denylist intention. - Action IntentionAction `json:",omitempty"` - - // Permissions is the list of additional L7 attributes that extend the - // intention definition. - // - // NOTE: This field is not editable unless editing the underlying - // service-intentions config entry directly. - Permissions []*IntentionPermission `json:",omitempty"` - - // DefaultAddr is not used. - // Deprecated: DefaultAddr is not used and may be removed in a future version. - DefaultAddr string `json:",omitempty"` - // DefaultPort is not used. - // Deprecated: DefaultPort is not used and may be removed in a future version. - DefaultPort int `json:",omitempty"` - - // Meta is arbitrary metadata associated with the intention. This is - // opaque to Consul but is served in API responses. - Meta map[string]string `json:",omitempty"` - - // Precedence is the order that the intention will be applied, with - // larger numbers being applied first. This is a read-only field, on - // any intention update it is updated. - Precedence int - - // CreatedAt and UpdatedAt keep track of when this record was created - // or modified. - CreatedAt, UpdatedAt time.Time - - // Hash of the contents of the intention - // - // This is needed mainly for replication purposes. When replicating from - // one DC to another keeping the content Hash will allow us to detect - // content changes more efficiently than checking every single field - Hash []byte `json:",omitempty"` - - CreateIndex uint64 - ModifyIndex uint64 -} - -// String returns human-friendly output describing ths intention. -func (i *Intention) String() string { - var detail string - switch n := len(i.Permissions); n { - case 0: - detail = string(i.Action) - case 1: - detail = "1 permission" - default: - detail = fmt.Sprintf("%d permissions", len(i.Permissions)) - } - - return fmt.Sprintf("%s => %s (%s)", - i.SourceString(), - i.DestinationString(), - detail) -} - -// SourceString returns the namespace/name format for the source, or -// just "name" if the namespace is the default namespace. -func (i *Intention) SourceString() string { - return i.partString(i.SourceNS, i.SourceName) -} - -// DestinationString returns the namespace/name format for the source, or -// just "name" if the namespace is the default namespace. -func (i *Intention) DestinationString() string { - return i.partString(i.DestinationNS, i.DestinationName) -} - -func (i *Intention) partString(ns, n string) string { - // For now we omit the default namespace from the output. In the future - // we might want to look at this and show this in a multi-namespace world. - if ns != "" && ns != IntentionDefaultNamespace { - n = ns + "/" + n - } - - return n -} - -// IntentionDefaultNamespace is the default namespace value. -const IntentionDefaultNamespace = "default" - -// IntentionAction is the action that the intention represents. This -// can be "allow" or "deny" to allowlist or denylist intentions. -type IntentionAction string - -const ( - IntentionActionAllow IntentionAction = "allow" - IntentionActionDeny IntentionAction = "deny" -) - -// IntentionSourceType is the type of the source within an intention. -type IntentionSourceType string - -const ( - // IntentionSourceConsul is a service within the Consul catalog. - IntentionSourceConsul IntentionSourceType = "consul" -) - -// IntentionMatch are the arguments for the intention match API. -type IntentionMatch struct { - By IntentionMatchType - Names []string -} - -// IntentionMatchType is the target for a match request. For example, -// matching by source will look for all intentions that match the given -// source value. -type IntentionMatchType string - -const ( - IntentionMatchSource IntentionMatchType = "source" - IntentionMatchDestination IntentionMatchType = "destination" -) - -// IntentionCheck are the arguments for the intention check API. For -// more documentation see the IntentionCheck function. -type IntentionCheck struct { - // Source and Destination are the source and destination values to - // check. The destination is always a Consul service, but the source - // may be other values as defined by the SourceType. - Source, Destination string - - // SourceType is the type of the value for the source. - SourceType IntentionSourceType -} - -// Intentions returns the list of intentions. -func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions") - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Intention - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// IntentionGetExact retrieves a single intention by its unique name instead of -// its ID. -func (h *Connect) IntentionGetExact(source, destination string, q *QueryOptions) (*Intention, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions/exact") - r.setQueryOptions(q) - r.params.Set("source", source) - r.params.Set("destination", destination) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - return nil, qm, nil - } else if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - return nil, nil, fmt.Errorf( - "Unexpected response %d: %s", resp.StatusCode, buf.String()) - } - - var out Intention - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// IntentionGet retrieves a single intention. -// -// Deprecated: use IntentionGetExact instead -func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - return nil, qm, nil - } else if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - return nil, nil, fmt.Errorf( - "Unexpected response %d: %s", resp.StatusCode, buf.String()) - } - - var out Intention - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// IntentionDeleteExact deletes a single intention by its unique name instead of its ID. -func (h *Connect) IntentionDeleteExact(source, destination string, q *WriteOptions) (*WriteMeta, error) { - r := h.c.newRequest("DELETE", "/v1/connect/intentions/exact") - r.setWriteOptions(q) - r.params.Set("source", source) - r.params.Set("destination", destination) - - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - qm := &WriteMeta{} - qm.RequestTime = rtt - - return qm, nil -} - -// IntentionDelete deletes a single intention. -// -// Deprecated: use IntentionDeleteExact instead -func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) { - r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id) - r.setWriteOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - qm := &WriteMeta{} - qm.RequestTime = rtt - - return qm, nil -} - -// IntentionMatch returns the list of intentions that match a given source -// or destination. The returned intentions are ordered by precedence where -// result[0] is the highest precedence (if that matches, then that rule overrides -// all other rules). -// -// Matching can be done for multiple names at the same time. The resulting -// map is keyed by the given names. Casing is preserved. -func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions/match") - r.setQueryOptions(q) - r.params.Set("by", string(args.By)) - for _, name := range args.Names { - r.params.Add("name", name) - } - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]*Intention - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// IntentionCheck returns whether a given source/destination would be allowed -// or not given the current set of intentions and the configuration of Consul. -func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions/check") - r.setQueryOptions(q) - r.params.Set("source", args.Source) - r.params.Set("destination", args.Destination) - if args.SourceType != "" { - r.params.Set("source-type", string(args.SourceType)) - } - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return false, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return false, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out struct{ Allowed bool } - if err := decodeBody(resp, &out); err != nil { - return false, nil, err - } - return out.Allowed, qm, nil -} - -// IntentionUpsert will update an existing intention. The Source & Destination parameters -// in the structure must be non-empty. The ID must be empty. -func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/connect/intentions/exact") - r.setWriteOptions(q) - r.params.Set("source", maybePrefixNamespaceAndPartition(ixn.SourcePartition, ixn.SourceNS, ixn.SourceName)) - r.params.Set("destination", maybePrefixNamespaceAndPartition(ixn.DestinationPartition, ixn.DestinationNS, ixn.DestinationName)) - r.obj = ixn - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -func maybePrefixNamespaceAndPartition(part, ns, name string) string { - switch { - case part == "" && ns == "": - return name - case part == "" && ns != "": - return ns + "/" + name - case part != "" && ns == "": - return part + "/" + IntentionDefaultNamespace + "/" + name - default: - return part + "/" + ns + "/" + name - } -} - -// IntentionCreate will create a new intention. The ID in the given -// structure must be empty and a generate ID will be returned on -// success. -// -// Deprecated: use IntentionUpsert instead -func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) { - r := c.c.newRequest("POST", "/v1/connect/intentions") - r.setWriteOptions(q) - r.obj = ixn - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// IntentionUpdate will update an existing intention. The ID in the given -// structure must be non-empty. -// -// Deprecated: use IntentionUpsert instead -func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID) - r.setWriteOptions(q) - r.obj = ixn - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go deleted file mode 100644 index b0269adaef6..00000000000 --- a/vendor/github.com/hashicorp/consul/api/coordinate.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "github.com/hashicorp/serf/coordinate" -) - -// CoordinateEntry represents a node and its associated network coordinate. -type CoordinateEntry struct { - Node string - Segment string - Partition string `json:",omitempty"` - Coord *coordinate.Coordinate -} - -// CoordinateDatacenterMap has the coordinates for servers in a given datacenter -// and area. Network coordinates are only compatible within the same area. -type CoordinateDatacenterMap struct { - Datacenter string - AreaID string - Coordinates []CoordinateEntry -} - -// Coordinate can be used to query the coordinate endpoints -type Coordinate struct { - c *Client -} - -// Coordinate returns a handle to the coordinate endpoints -func (c *Client) Coordinate() *Coordinate { - return &Coordinate{c} -} - -// Datacenters is used to return the coordinates of all the servers in the WAN -// pool. -func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { - r := c.c.newRequest("GET", "/v1/coordinate/datacenters") - _, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out []*CoordinateDatacenterMap - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to return the coordinates of all the nodes in the LAN pool. -func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/nodes") - r.setQueryOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Update inserts or updates the LAN coordinate of a node. -func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/coordinate/update") - r.setWriteOptions(q) - r.obj = coord - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Node is used to return the coordinates of a single node in the LAN pool. -func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go deleted file mode 100644 index e6b5dc52dac..00000000000 --- a/vendor/github.com/hashicorp/consul/api/debug.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "fmt" - "io" - "strconv" -) - -// Debug can be used to query the /debug/pprof endpoints to gather -// profiling information about the target agent.Debug -// -// The agent must have enable_debug set to true for profiling to be enabled -// and for these endpoints to function. -type Debug struct { - c *Client -} - -// Debug returns a handle that exposes the internal debug endpoints. -func (c *Client) Debug() *Debug { - return &Debug{c} -} - -// Heap returns a pprof heap dump -func (d *Debug) Heap() ([]byte, error) { - r := d.c.newRequest("GET", "/debug/pprof/heap") - _, resp, err := d.c.doRequest(r) - if err != nil { - return nil, fmt.Errorf("error making request: %s", err) - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - // We return a raw response because we're just passing through a response - // from the pprof handlers - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error decoding body: %s", err) - } - - return body, nil -} - -// Profile returns a pprof CPU profile for the specified number of seconds -func (d *Debug) Profile(seconds int) ([]byte, error) { - r := d.c.newRequest("GET", "/debug/pprof/profile") - - // Capture a profile for the specified number of seconds - r.params.Set("seconds", strconv.Itoa(seconds)) - - _, resp, err := d.c.doRequest(r) - if err != nil { - return nil, fmt.Errorf("error making request: %s", err) - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - // We return a raw response because we're just passing through a response - // from the pprof handlers - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error decoding body: %s", err) - } - - return body, nil -} - -// PProf returns a pprof profile for the specified number of seconds. The caller -// is responsible for closing the returned io.ReadCloser once all bytes are read. -func (d *Debug) PProf(ctx context.Context, name string, seconds int) (io.ReadCloser, error) { - r := d.c.newRequest("GET", "/debug/pprof/"+name) - r.ctx = ctx - - // Capture a profile for the specified number of seconds - r.params.Set("seconds", strconv.Itoa(seconds)) - - _, resp, err := d.c.doRequest(r) - if err != nil { - return nil, fmt.Errorf("error making request: %s", err) - } - if err := requireOK(resp); err != nil { - return nil, err - } - return resp.Body, nil -} - -// Trace returns an execution trace -func (d *Debug) Trace(seconds int) ([]byte, error) { - r := d.c.newRequest("GET", "/debug/pprof/trace") - - // Capture a trace for the specified number of seconds - r.params.Set("seconds", strconv.Itoa(seconds)) - - _, resp, err := d.c.doRequest(r) - if err != nil { - return nil, fmt.Errorf("error making request: %s", err) - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - // We return a raw response because we're just passing through a response - // from the pprof handlers - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error decoding body: %s", err) - } - - return body, nil -} - -// Goroutine returns a pprof goroutine profile -func (d *Debug) Goroutine() ([]byte, error) { - r := d.c.newRequest("GET", "/debug/pprof/goroutine") - - _, resp, err := d.c.doRequest(r) - if err != nil { - return nil, fmt.Errorf("error making request: %s", err) - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - // We return a raw response because we're just passing through a response - // from the pprof handlers - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error decoding body: %s", err) - } - - return body, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go deleted file mode 100644 index 4b6260cf34a..00000000000 --- a/vendor/github.com/hashicorp/consul/api/discovery_chain.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "encoding/json" - "fmt" - "time" -) - -// DiscoveryChain can be used to query the discovery-chain endpoints -type DiscoveryChain struct { - c *Client -} - -// DiscoveryChain returns a handle to the discovery-chain endpoints -func (c *Client) DiscoveryChain() *DiscoveryChain { - return &DiscoveryChain{c} -} - -func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryOptions) (*DiscoveryChainResponse, *QueryMeta, error) { - if name == "" { - return nil, nil, fmt.Errorf("Name parameter must not be empty") - } - - method := "GET" - if opts != nil && opts.requiresPOST() { - method = "POST" - } - - r := d.c.newRequest(method, fmt.Sprintf("/v1/discovery-chain/%s", name)) - r.setQueryOptions(q) - - if opts != nil { - if opts.EvaluateInDatacenter != "" { - r.params.Set("compile-dc", opts.EvaluateInDatacenter) - } - } - - if method == "POST" { - r.obj = opts - } - rtt, resp, err := d.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out DiscoveryChainResponse - - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -type DiscoveryChainOptions struct { - EvaluateInDatacenter string `json:"-"` - - // OverrideMeshGateway allows for the mesh gateway setting to be overridden - // for any resolver in the compiled chain. - OverrideMeshGateway MeshGatewayConfig `json:",omitempty"` - - // OverrideProtocol allows for the final protocol for the chain to be - // altered. - // - // - If the chain ordinarily would be TCP and an L7 protocol is passed here - // the chain will not include Routers or Splitters. - // - // - If the chain ordinarily would be L7 and TCP is passed here the chain - // will not include Routers or Splitters. - OverrideProtocol string `json:",omitempty"` - - // OverrideConnectTimeout allows for the ConnectTimeout setting to be - // overridden for any resolver in the compiled chain. - OverrideConnectTimeout time.Duration `json:",omitempty"` -} - -func (o *DiscoveryChainOptions) requiresPOST() bool { - if o == nil { - return false - } - return o.OverrideMeshGateway.Mode != "" || - o.OverrideProtocol != "" || - o.OverrideConnectTimeout != 0 -} - -type DiscoveryChainResponse struct { - Chain *CompiledDiscoveryChain -} - -type CompiledDiscoveryChain struct { - ServiceName string - Namespace string - Datacenter string - - // CustomizationHash is a unique hash of any data that affects the - // compilation of the discovery chain other than config entries or the - // name/namespace/datacenter evaluation criteria. - // - // If set, this value should be used to prefix/suffix any generated load - // balancer data plane objects to avoid sharing customized and - // non-customized versions. - CustomizationHash string - - // Default indicates if this discovery chain is based on no - // service-resolver, service-splitter, or service-router config entries. - Default bool - - // Protocol is the overall protocol shared by everything in the chain. - Protocol string - - // ServiceMeta is the metadata from the underlying service-defaults config - // entry for the service named ServiceName. - ServiceMeta map[string]string - - // StartNode is the first key into the Nodes map that should be followed - // when walking the discovery chain. - StartNode string - - // Nodes contains all nodes available for traversal in the chain keyed by a - // unique name. You can walk this by starting with StartNode. - // - // NOTE: The names should be treated as opaque values and are only - // guaranteed to be consistent within a single compilation. - Nodes map[string]*DiscoveryGraphNode - - // Targets is a list of all targets used in this chain. - // - // NOTE: The names should be treated as opaque values and are only - // guaranteed to be consistent within a single compilation. - Targets map[string]*DiscoveryTarget -} - -const ( - DiscoveryGraphNodeTypeRouter = "router" - DiscoveryGraphNodeTypeSplitter = "splitter" - DiscoveryGraphNodeTypeResolver = "resolver" -) - -// DiscoveryGraphNode is a single node in the compiled discovery chain. -type DiscoveryGraphNode struct { - Type string - Name string // this is NOT necessarily a service - - // fields for Type==router - Routes []*DiscoveryRoute - - // fields for Type==splitter - Splits []*DiscoverySplit - - // fields for Type==resolver - Resolver *DiscoveryResolver - - // shared by Type==resolver || Type==splitter - LoadBalancer *LoadBalancer `json:",omitempty"` -} - -// compiled form of ServiceRoute -type DiscoveryRoute struct { - Definition *ServiceRoute - NextNode string -} - -// compiled form of ServiceSplit -type DiscoverySplit struct { - Weight float32 - NextNode string -} - -// compiled form of ServiceResolverConfigEntry -type DiscoveryResolver struct { - Default bool - ConnectTimeout time.Duration - Target string - Failover *DiscoveryFailover -} - -func (r *DiscoveryResolver) MarshalJSON() ([]byte, error) { - type Alias DiscoveryResolver - exported := &struct { - ConnectTimeout string `json:",omitempty"` - *Alias - }{ - ConnectTimeout: r.ConnectTimeout.String(), - Alias: (*Alias)(r), - } - if r.ConnectTimeout == 0 { - exported.ConnectTimeout = "" - } - - return json.Marshal(exported) -} - -func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error { - type Alias DiscoveryResolver - aux := &struct { - ConnectTimeout string - *Alias - }{ - Alias: (*Alias)(r), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.ConnectTimeout != "" { - if r.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { - return err - } - } - return nil -} - -// compiled form of ServiceResolverFailover -type DiscoveryFailover struct { - Targets []string - Policy ServiceResolverFailoverPolicy `json:",omitempty"` -} - -// DiscoveryTarget represents all of the inputs necessary to use a resolver -// config entry to execute a catalog query to generate a list of service -// instances during discovery. -type DiscoveryTarget struct { - ID string - - Service string - ServiceSubset string - Namespace string - Datacenter string - - MeshGateway MeshGatewayConfig - Subset ServiceResolverSubset - ConnectTimeout time.Duration - External bool - SNI string - Name string -} - -func (t *DiscoveryTarget) MarshalJSON() ([]byte, error) { - type Alias DiscoveryTarget - exported := &struct { - ConnectTimeout string `json:",omitempty"` - *Alias - }{ - ConnectTimeout: t.ConnectTimeout.String(), - Alias: (*Alias)(t), - } - if t.ConnectTimeout == 0 { - exported.ConnectTimeout = "" - } - - return json.Marshal(exported) -} - -func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error { - type Alias DiscoveryTarget - aux := &struct { - ConnectTimeout string - *Alias - }{ - Alias: (*Alias)(t), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.ConnectTimeout != "" { - if t.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go deleted file mode 100644 index efba89d3b56..00000000000 --- a/vendor/github.com/hashicorp/consul/api/event.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bytes" - "strconv" -) - -// Event can be used to query the Event endpoints -type Event struct { - c *Client -} - -// UserEvent represents an event that was fired by the user -type UserEvent struct { - ID string - Name string - Payload []byte - NodeFilter string - ServiceFilter string - TagFilter string - Version int - LTime uint64 -} - -// Event returns a handle to the event endpoints -func (c *Client) Event() *Event { - return &Event{c} -} - -// Fire is used to fire a new user event. Only the Name, Payload and Filters -// are respected. This returns the ID or an associated error. Cross DC requests -// are supported. -func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { - r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) - r.setWriteOptions(q) - if params.NodeFilter != "" { - r.params.Set("node", params.NodeFilter) - } - if params.ServiceFilter != "" { - r.params.Set("service", params.ServiceFilter) - } - if params.TagFilter != "" { - r.params.Set("tag", params.TagFilter) - } - if params.Payload != nil { - r.body = bytes.NewReader(params.Payload) - } - r.header.Set("Content-Type", "application/octet-stream") - - rtt, resp, err := e.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out UserEvent - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// List is used to get the most recent events an agent has received. -// This list can be optionally filtered by the name. This endpoint supports -// quasi-blocking queries. The index is not monotonic, nor does it provide provide -// LastContact or KnownLeader. -func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { - r := e.c.newRequest("GET", "/v1/event/list") - r.setQueryOptions(q) - if name != "" { - r.params.Set("name", name) - } - rtt, resp, err := e.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*UserEvent - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// IDToIndex is a bit of a hack. This simulates the index generation to -// convert an event ID into a WaitIndex. -func (e *Event) IDToIndex(uuid string) uint64 { - lower := uuid[0:8] + uuid[9:13] + uuid[14:18] - upper := uuid[19:23] + uuid[24:36] - lowVal, err := strconv.ParseUint(lower, 16, 64) - if err != nil { - panic("Failed to convert " + lower) - } - highVal, err := strconv.ParseUint(upper, 16, 64) - if err != nil { - panic("Failed to convert " + upper) - } - return lowVal ^ highVal -} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go deleted file mode 100644 index a0230020460..00000000000 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "encoding/json" - "fmt" - "strings" - "time" -) - -const ( - // HealthAny is special, and is used as a wild card, - // not as a specific state. - HealthAny = "any" - HealthPassing = "passing" - HealthWarning = "warning" - HealthCritical = "critical" - HealthMaint = "maintenance" -) - -const ( - serviceHealth = "service" - connectHealth = "connect" - ingressHealth = "ingress" -) - -const ( - // NodeMaint is the special key set by a node in maintenance mode. - NodeMaint = "_node_maintenance" - - // ServiceMaintPrefix is the prefix for a service in maintenance mode. - ServiceMaintPrefix = "_service_maintenance:" -) - -// HealthCheck is used to represent a single check -type HealthCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string - ServiceTags []string - Type string - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` - ExposedPort int - PeerName string `json:",omitempty"` - - Definition HealthCheckDefinition - - CreateIndex uint64 - ModifyIndex uint64 -} - -// HealthCheckDefinition is used to store the details about -// a health check's execution. -type HealthCheckDefinition struct { - HTTP string - Header map[string][]string - Method string - Body string - TLSServerName string - TLSSkipVerify bool - TCP string - TCPUseTLS bool - UDP string - GRPC string - OSService string - GRPCUseTLS bool - IntervalDuration time.Duration `json:"-"` - TimeoutDuration time.Duration `json:"-"` - DeregisterCriticalServiceAfterDuration time.Duration `json:"-"` - - // DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead. - Interval ReadableDuration - Timeout ReadableDuration - DeregisterCriticalServiceAfter ReadableDuration -} - -func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) { - type Alias HealthCheckDefinition - out := &struct { - Interval string - Timeout string - DeregisterCriticalServiceAfter string - *Alias - }{ - Interval: d.Interval.String(), - Timeout: d.Timeout.String(), - DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(), - Alias: (*Alias)(d), - } - - if d.IntervalDuration != 0 { - out.Interval = d.IntervalDuration.String() - } else if d.Interval != 0 { - out.Interval = d.Interval.String() - } - if d.TimeoutDuration != 0 { - out.Timeout = d.TimeoutDuration.String() - } else if d.Timeout != 0 { - out.Timeout = d.Timeout.String() - } - if d.DeregisterCriticalServiceAfterDuration != 0 { - out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String() - } else if d.DeregisterCriticalServiceAfter != 0 { - out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String() - } - - return json.Marshal(out) -} - -func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) { - type Alias HealthCheckDefinition - aux := &struct { - IntervalDuration interface{} - TimeoutDuration interface{} - DeregisterCriticalServiceAfterDuration interface{} - *Alias - }{ - Alias: (*Alias)(t), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - // Parse the values into both the time.Duration and old ReadableDuration fields. - - if aux.IntervalDuration == nil { - t.IntervalDuration = time.Duration(t.Interval) - } else { - switch v := aux.IntervalDuration.(type) { - case string: - if t.IntervalDuration, err = time.ParseDuration(v); err != nil { - return err - } - case float64: - t.IntervalDuration = time.Duration(v) - } - t.Interval = ReadableDuration(t.IntervalDuration) - } - - if aux.TimeoutDuration == nil { - t.TimeoutDuration = time.Duration(t.Timeout) - } else { - switch v := aux.TimeoutDuration.(type) { - case string: - if t.TimeoutDuration, err = time.ParseDuration(v); err != nil { - return err - } - case float64: - t.TimeoutDuration = time.Duration(v) - } - t.Timeout = ReadableDuration(t.TimeoutDuration) - } - if aux.DeregisterCriticalServiceAfterDuration == nil { - t.DeregisterCriticalServiceAfterDuration = time.Duration(t.DeregisterCriticalServiceAfter) - } else { - switch v := aux.DeregisterCriticalServiceAfterDuration.(type) { - case string: - if t.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(v); err != nil { - return err - } - case float64: - t.DeregisterCriticalServiceAfterDuration = time.Duration(v) - } - t.DeregisterCriticalServiceAfter = ReadableDuration(t.DeregisterCriticalServiceAfterDuration) - } - - return nil -} - -// HealthChecks is a collection of HealthCheck structs. -type HealthChecks []*HealthCheck - -// AggregatedStatus returns the "best" status for the list of health checks. -// Because a given entry may have many service and node-level health checks -// attached, this function determines the best representative of the status as -// as single string using the following heuristic: -// -// maintenance > critical > warning > passing -func (c HealthChecks) AggregatedStatus() string { - var passing, warning, critical, maintenance bool - for _, check := range c { - id := check.CheckID - if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { - maintenance = true - continue - } - - switch check.Status { - case HealthPassing: - passing = true - case HealthWarning: - warning = true - case HealthCritical: - critical = true - default: - return "" - } - } - - switch { - case maintenance: - return HealthMaint - case critical: - return HealthCritical - case warning: - return HealthWarning - case passing: - return HealthPassing - default: - return HealthPassing - } -} - -// ServiceEntry is used for the health service endpoint -type ServiceEntry struct { - Node *Node - Service *AgentService - Checks HealthChecks -} - -// Health can be used to query the Health endpoints -type Health struct { - c *Client -} - -// Health returns a handle to the health endpoints -func (c *Client) Health() *Health { - return &Health{c} -} - -// Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/checks/"+service) - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query health information along with service info -// for a given service. It can optionally do server-side filtering on a tag -// or nodes with passing health checks only. -func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - var tags []string - if tag != "" { - tags = []string{tag} - } - return h.service(service, tags, passingOnly, q, serviceHealth) -} - -func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tags, passingOnly, q, serviceHealth) -} - -// Connect is equivalent to Service except that it will only return services -// which are Connect-enabled and will returns the connection address for Connect -// client's to use which may be a proxy in front of the named service. If -// passingOnly is true only instances where both the service and any proxy are -// healthy will be returned. -func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - var tags []string - if tag != "" { - tags = []string{tag} - } - return h.service(service, tags, passingOnly, q, connectHealth) -} - -func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tags, passingOnly, q, connectHealth) -} - -// Ingress is equivalent to Connect except that it will only return associated -// ingress gateways for the requested service. -func (h *Health) Ingress(service string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - var tags []string - return h.service(service, tags, passingOnly, q, ingressHealth) -} - -func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, healthType string) ([]*ServiceEntry, *QueryMeta, error) { - var path string - switch healthType { - case connectHealth: - path = "/v1/health/connect/" + service - case ingressHealth: - path = "/v1/health/ingress/" + service - default: - path = "/v1/health/service/" + service - } - - r := h.c.newRequest("GET", path) - r.setQueryOptions(q) - if len(tags) > 0 { - for _, tag := range tags { - r.params.Add("tag", tag) - } - } - if passingOnly { - r.params.Set(HealthPassing, "1") - } - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*ServiceEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// State is used to retrieve all the checks in a given state. -// The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - switch state { - case HealthAny: - case HealthWarning: - case HealthCritical: - case HealthPassing: - default: - return nil, nil, fmt.Errorf("Unsupported state: %v", state) - } - r := h.c.newRequest("GET", "/v1/health/state/"+state) - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/internal.go b/vendor/github.com/hashicorp/consul/api/internal.go deleted file mode 100644 index dee161a65eb..00000000000 --- a/vendor/github.com/hashicorp/consul/api/internal.go +++ /dev/null @@ -1,64 +0,0 @@ -package api - -import "context" - -// Internal can be used to query endpoints that are intended for -// Hashicorp internal-use only. -type Internal struct { - c *Client -} - -// Internal returns a handle to endpoints that are for internal -// Hashicorp usage only. There is not guarantee that these will -// be backwards-compatible or supported, so usage of these is -// not encouraged. -func (c *Client) Internal() *Internal { - return &Internal{c} -} - -type AssignServiceManualVIPsRequest struct { - Service string - ManualVIPs []string -} - -type AssignServiceManualVIPsResponse struct { - ServiceFound bool `json:"Found"` - UnassignedFrom []PeeredServiceName -} - -type PeeredServiceName struct { - ServiceName CompoundServiceName - Peer string -} - -func (i *Internal) AssignServiceVirtualIP( - ctx context.Context, - service string, - manualVIPs []string, - wo *WriteOptions, -) (*AssignServiceManualVIPsResponse, *QueryMeta, error) { - req := i.c.newRequest("PUT", "/v1/internal/service-virtual-ip") - req.setWriteOptions(wo) - req.ctx = ctx - req.obj = AssignServiceManualVIPsRequest{ - Service: service, - ManualVIPs: manualVIPs, - } - rtt, resp, err := i.c.doRequest(req) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{RequestTime: rtt} - parseQueryMeta(resp, qm) - - var out AssignServiceManualVIPsResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go deleted file mode 100644 index b9d330a6fd3..00000000000 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// KVPair is used to represent a single K/V entry -type KVPair struct { - // Key is the name of the key. It is also part of the URL path when accessed - // via the API. - Key string - - // CreateIndex holds the index corresponding the creation of this KVPair. This - // is a read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // LockIndex holds the index corresponding to a lock on this key, if any. This - // is a read-only field. - LockIndex uint64 - - // Flags are any user-defined flags on the key. It is up to the implementer - // to check these values, since Consul does not treat them specially. - Flags uint64 - - // Value is the value for the key. This can be any value, but it will be - // base64 encoded upon transport. - Value []byte - - // Session is a string representing the ID of the session. Any other - // interactions with this key over the same session must specify the same - // session ID. - Session string - - // Namespace is the namespace the KVPair is associated with - // Namespacing is a Consul Enterprise feature. - Namespace string `json:",omitempty"` - - // Partition is the partition the KVPair is associated with - // Admin Partition is a Consul Enterprise feature. - Partition string `json:",omitempty"` -} - -// KVPairs is a list of KVPair objects -type KVPairs []*KVPair - -// KV is used to manipulate the K/V API -type KV struct { - c *Client -} - -// KV is used to return a handle to the K/V apis -func (c *Client) KV() *KV { - return &KV{c} -} - -// Get is used to lookup a single key. The returned pointer -// to the KVPair will be nil if the key does not exist. -func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { - resp, qm, err := k.getInternal(key, nil, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer closeResponseBody(resp) - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to lookup all keys under a prefix -func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { - resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer closeResponseBody(resp) - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Keys is used to list all the keys under a prefix. Optionally, -// a separator can be used to limit the responses. -func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { - params := map[string]string{"keys": ""} - if separator != "" { - params["separator"] = separator - } - resp, qm, err := k.getInternal(prefix, params, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer closeResponseBody(resp) - - var entries []string - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setQueryOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return nil, nil, err - } - - err = requireHttpCodes(resp, 200, 404) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - closeResponseBody(resp) - return nil, qm, nil - } - - return resp, qm, nil -} - -// Put is used to write a new value. Only the -// Key, Flags and Value is respected. -func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { - params := make(map[string]string, 1) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - _, wm, err := k.put(p.Key, params, p.Value, q) - return wm, err -} - -// CAS is used for a Check-And-Set operation. The Key, -// ModifyIndex, Flags and Value are respected. Returns true -// on success or false on failures. -func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) - return k.put(p.Key, params, p.Value, q) -} - -// Acquire is used for a lock acquisition operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["acquire"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -// Release is used for a lock release operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["release"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { - if len(key) > 0 && key[0] == '/' { - return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) - } - - r := k.c.newRequest("PUT", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - r.body = bytes.NewReader(body) - r.header.Set("Content-Type", "application/octet-stream") - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return false, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return false, nil, err - } - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// Delete is used to delete a single key -func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(key, nil, w) - return qm, err -} - -// DeleteCAS is used for a Delete Check-And-Set operation. The Key -// and ModifyIndex are respected. Returns true on success or false on failures. -func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := map[string]string{ - "cas": strconv.FormatUint(p.ModifyIndex, 10), - } - return k.deleteInternal(p.Key, params, q) -} - -// DeleteTree is used to delete all keys under a prefix -func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) - return qm, err -} - -func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return false, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return false, nil, err - } - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// The Txn function has been deprecated from the KV object; please see the Txn -// object for more information about Transactions. -func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { - var ops TxnOps - for _, op := range txn { - ops = append(ops, &TxnOp{KV: op}) - } - - respOk, txnResp, qm, err := k.c.txn(ops, q) - if err != nil { - return false, nil, nil, err - } - - // Convert from the internal format. - kvResp := KVTxnResponse{ - Errors: txnResp.Errors, - } - for _, result := range txnResp.Results { - kvResp.Results = append(kvResp.Results, result.KV) - } - return respOk, &kvResp, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go deleted file mode 100644 index e9529f7bde6..00000000000 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "fmt" - "sync" - "time" -) - -const ( - // DefaultLockSessionName is the Session Name we assign if none is provided - DefaultLockSessionName = "Consul API Lock" - - // DefaultLockSessionTTL is the default session TTL if no Session is provided - // when creating a new Lock. This is used because we do not have another - // other check to depend upon. - DefaultLockSessionTTL = "15s" - - // DefaultLockWaitTime is how long we block for at a time to check if lock - // acquisition is possible. This affects the minimum time it takes to cancel - // a Lock acquisition. - DefaultLockWaitTime = 15 * time.Second - - // DefaultLockRetryTime is how long we wait after a failed lock acquisition - // before attempting to do the lock again. This is so that once a lock-delay - // is in effect, we do not hot loop retrying the acquisition. - DefaultLockRetryTime = 5 * time.Second - - // DefaultMonitorRetryTime is how long we wait after a failed monitor check - // of a lock (500 response code). This allows the monitor to ride out brief - // periods of unavailability, subject to the MonitorRetries setting in the - // lock options which is by default set to 0, disabling this feature. This - // affects locks and semaphores. - DefaultMonitorRetryTime = 2 * time.Second - - // LockFlagValue is a magic flag we set to indicate a key - // is being used for a lock. It is used to detect a potential - // conflict with a semaphore. - LockFlagValue = 0x2ddccbc058a50c18 -) - -var ( - // ErrLockHeld is returned if we attempt to double lock - ErrLockHeld = fmt.Errorf("Lock already held") - - // ErrLockNotHeld is returned if we attempt to unlock a lock - // that we do not hold. - ErrLockNotHeld = fmt.Errorf("Lock not held") - - // ErrLockInUse is returned if we attempt to destroy a lock - // that is in use. - ErrLockInUse = fmt.Errorf("Lock in use") - - // ErrLockConflict is returned if the flags on a key - // used for a lock do not match expectation - ErrLockConflict = fmt.Errorf("Existing key does not match lock use") -) - -// Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. -type Lock struct { - c *Client - opts *LockOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// LockOptions is used to parameterize the Lock behavior. -type LockOptions struct { - Key string // Must be set and have write permissions - Value []byte // Optional, value to associate with the lock - Session string // Optional, created if not specified - SessionOpts *SessionEntry // Optional, options to use when creating a session - SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) - SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime - LockTryOnce bool // Optional, defaults to false which means try forever - LockDelay time.Duration // Optional, defaults to 15s - Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace -} - -// LockKey returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockKey(key string) (*Lock, error) { - opts := &LockOptions{ - Key: key, - } - return c.LockOpts(opts) -} - -// LockOpts returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { - if opts.Key == "" { - return nil, fmt.Errorf("missing key") - } - if opts.SessionName == "" { - opts.SessionName = DefaultLockSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultLockSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.LockWaitTime == 0 { - opts.LockWaitTime = DefaultLockWaitTime - } - l := &Lock{ - c: c, - opts: opts, - } - return l, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Providing a non-nil stopCh can be used to abort the lock attempt. -// Returns a channel that is closed if our lock is lost or an error. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the lock is held until Unlock() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the lock being lost. -func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return nil, ErrLockHeld - } - - wOpts := WriteOptions{ - Namespace: l.opts.Namespace, - } - - // Check if we need to create a session first - l.lockSession = l.opts.Session - if l.lockSession == "" { - s, err := l.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - l.sessionRenew = make(chan struct{}) - l.lockSession = s - - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, &wOpts, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() - } - - // Setup the query options - kv := l.c.KV() - qOpts := QueryOptions{ - WaitTime: l.opts.LockWaitTime, - Namespace: l.opts.Namespace, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if l.opts.LockTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > l.opts.LockWaitTime { - return nil, nil - } - - // Query wait time should not exceed the lock wait time - qOpts.WaitTime = l.opts.LockWaitTime - elapsed - } - attempts++ - - // Look for an existing lock, blocking until not taken - pair, meta, err := kv.Get(l.opts.Key, &qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read lock: %v", err) - } - if pair != nil && pair.Flags != LockFlagValue { - return nil, ErrLockConflict - } - locked := false - if pair != nil && pair.Session == l.lockSession { - goto HELD - } - if pair != nil && pair.Session != "" { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Try to acquire the lock - pair = l.lockEntry(l.lockSession) - - locked, _, err = kv.Acquire(pair, &wOpts) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock: %v", err) - } - - // Handle the case of not getting the lock - if !locked { - // Determine why the lock failed - qOpts.WaitIndex = 0 - pair, meta, err = kv.Get(l.opts.Key, &qOpts) - if err != nil { - return nil, err - } - if pair != nil && pair.Session != "" { - //If the session is not null, this means that a wait can safely happen - //using a long poll - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } else { - // If the session is empty and the lock failed to acquire, then it means - // a lock-delay is in effect and a timed wait must be used - select { - case <-time.After(DefaultLockRetryTime): - goto WAIT - case <-stopCh: - return nil, nil - } - } - } - -HELD: - // Watch to ensure we maintain leadership - leaderCh := make(chan struct{}) - go l.monitorLock(l.lockSession, leaderCh) - - // Set that we own the lock - l.isHeld = true - - // Locked! All done - return leaderCh, nil -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *Lock) Unlock() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Ensure the lock is actually held - if !l.isHeld { - return ErrLockNotHeld - } - - // Set that we no longer own the lock - l.isHeld = false - - // Stop the session renew - if l.sessionRenew != nil { - defer func() { - close(l.sessionRenew) - l.sessionRenew = nil - }() - } - - // Get the lock entry, and clear the lock session - lockEnt := l.lockEntry(l.lockSession) - l.lockSession = "" - - // Release the lock explicitly - kv := l.c.KV() - w := WriteOptions{Namespace: l.opts.Namespace} - - _, _, err := kv.Release(lockEnt, &w) - if err != nil { - return fmt.Errorf("failed to release lock: %v", err) - } - return nil -} - -// Destroy is used to cleanup the lock entry. It is not necessary -// to invoke. It will fail if the lock is in use. -func (l *Lock) Destroy() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return ErrLockHeld - } - - // Look for an existing lock - kv := l.c.KV() - q := QueryOptions{Namespace: l.opts.Namespace} - - pair, _, err := kv.Get(l.opts.Key, &q) - if err != nil { - return fmt.Errorf("failed to read lock: %v", err) - } - - // Nothing to do if the lock does not exist - if pair == nil { - return nil - } - - // Check for possible flag conflict - if pair.Flags != LockFlagValue { - return ErrLockConflict - } - - // Check if it is in use - if pair.Session != "" { - return ErrLockInUse - } - - // Attempt the delete - w := WriteOptions{Namespace: l.opts.Namespace} - didRemove, _, err := kv.DeleteCAS(pair, &w) - if err != nil { - return fmt.Errorf("failed to remove lock: %v", err) - } - if !didRemove { - return ErrLockInUse - } - return nil -} - -// createSession is used to create a new managed session -func (l *Lock) createSession() (string, error) { - session := l.c.Session() - se := l.opts.SessionOpts - if se == nil { - se = &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, - LockDelay: l.opts.LockDelay, - } - } - w := WriteOptions{Namespace: l.opts.Namespace} - id, _, err := session.Create(se, &w) - if err != nil { - return "", err - } - return id, nil -} - -// lockEntry returns a formatted KVPair for the lock -func (l *Lock) lockEntry(session string) *KVPair { - return &KVPair{ - Key: l.opts.Key, - Value: l.opts.Value, - Session: session, - Flags: LockFlagValue, - } -} - -// monitorLock is a long running routine to monitor a lock ownership -// It closes the stopCh if we lose our leadership. -func (l *Lock) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := l.c.KV() - opts := QueryOptions{ - RequireConsistent: true, - Namespace: l.opts.Namespace, - } -WAIT: - retries := l.opts.MonitorRetries -RETRY: - pair, meta, err := kv.Get(l.opts.Key, &opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(l.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - if pair != nil && pair.Session == session { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go deleted file mode 100644 index 98afd229989..00000000000 --- a/vendor/github.com/hashicorp/consul/api/namespace.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "encoding/json" - "fmt" - "time" -) - -// Namespace is the configuration of a single namespace. Namespacing is a Consul Enterprise feature. -type Namespace struct { - // Name is the name of the Namespace. It must be unique and - // must be a DNS hostname. There are also other reserved names - // that may not be used. - Name string `json:"Name"` - - // Description is where the user puts any information they want - // about the namespace. It is not used internally. - Description string `json:"Description,omitempty"` - - // ACLs is the configuration of ACLs for this namespace. It has its - // own struct so that we can add more to it in the future. - // This is nullable so that we can omit if empty when encoding in JSON - ACLs *NamespaceACLConfig `json:"ACLs,omitempty"` - - // Meta is a map that can be used to add kv metadata to the namespace definition - Meta map[string]string `json:"Meta,omitempty"` - - // DeletedAt is the time when the Namespace was marked for deletion - // This is nullable so that we can omit if empty when encoding in JSON - DeletedAt *time.Time `json:"DeletedAt,omitempty" alias:"deleted_at"` - - // Partition which contains the Namespace. - Partition string `json:"Partition,omitempty"` - - // CreateIndex is the Raft index at which the Namespace was created - CreateIndex uint64 `json:"CreateIndex,omitempty"` - - // ModifyIndex is the latest Raft index at which the Namespace was modified. - ModifyIndex uint64 `json:"ModifyIndex,omitempty"` -} - -func (n *Namespace) UnmarshalJSON(data []byte) error { - type Alias Namespace - aux := struct { - DeletedAtSnake *time.Time `json:"deleted_at"` - *Alias - }{ - Alias: (*Alias)(n), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - if n.DeletedAt == nil && aux.DeletedAtSnake != nil { - n.DeletedAt = aux.DeletedAtSnake - } - - return nil -} - -// NamespaceACLConfig is the Namespace specific ACL configuration container -type NamespaceACLConfig struct { - // PolicyDefaults is the list of policies that should be used for the parent authorizer - // of all tokens in the associated namespace. - PolicyDefaults []ACLLink `json:"PolicyDefaults" alias:"policy_defaults"` - // RoleDefaults is the list of roles that should be used for the parent authorizer - // of all tokens in the associated namespace. - RoleDefaults []ACLLink `json:"RoleDefaults" alias:"role_defaults"` -} - -func (n *NamespaceACLConfig) UnmarshalJSON(data []byte) error { - type Alias NamespaceACLConfig - aux := struct { - PolicyDefaultsSnake []ACLLink `json:"policy_defaults"` - RoleDefaultsSnake []ACLLink `json:"role_defaults"` - *Alias - }{ - Alias: (*Alias)(n), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - if n.PolicyDefaults == nil { - for _, pd := range aux.PolicyDefaultsSnake { - n.PolicyDefaults = append(n.PolicyDefaults, pd) - } - } - if n.RoleDefaults == nil { - for _, pd := range aux.RoleDefaultsSnake { - n.RoleDefaults = append(n.RoleDefaults, pd) - } - } - return nil -} - -// Namespaces can be used to manage Namespaces in Consul Enterprise.. -type Namespaces struct { - c *Client -} - -// Namespaces returns a handle to the namespaces endpoints. -func (c *Client) Namespaces() *Namespaces { - return &Namespaces{c} -} - -func (n *Namespaces) Create(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) { - if ns.Name == "" { - return nil, nil, fmt.Errorf("Must specify a Name for Namespace creation") - } - - r := n.c.newRequest("PUT", "/v1/namespace") - r.setWriteOptions(q) - r.obj = ns - rtt, resp, err := n.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out Namespace - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -func (n *Namespaces) Update(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) { - if ns.Name == "" { - return nil, nil, fmt.Errorf("Must specify a Name for Namespace updating") - } - - r := n.c.newRequest("PUT", "/v1/namespace/"+ns.Name) - r.setWriteOptions(q) - r.obj = ns - rtt, resp, err := n.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out Namespace - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -func (n *Namespaces) Read(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) { - var out Namespace - r := n.c.newRequest("GET", "/v1/namespace/"+name) - r.setQueryOptions(q) - rtt, resp, err := n.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -func (n *Namespaces) Delete(name string, q *WriteOptions) (*WriteMeta, error) { - r := n.c.newRequest("DELETE", "/v1/namespace/"+name) - r.setWriteOptions(q) - rtt, resp, err := n.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) { - var out []*Namespace - r := n.c.newRequest("GET", "/v1/namespaces") - r.setQueryOptions(q) - rtt, resp, err := n.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go deleted file mode 100644 index 667dcd87233..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// Operator can be used to perform low-level operator tasks for Consul. -type Operator struct { - c *Client -} - -// Operator returns a handle to the operator endpoints. -func (c *Client) Operator() *Operator { - return &Operator{c} -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go deleted file mode 100644 index 9228d89b47c..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// The /v1/operator/area endpoints are available only in Consul Enterprise and -// interact with its network area subsystem. Network areas are used to link -// together Consul servers in different Consul datacenters. With network areas, -// Consul datacenters can be linked together in ways other than a fully-connected -// mesh, as is required for Consul's WAN. - -import ( - "net" - "time" -) - -// Area defines a network area. -type Area struct { - // ID is this identifier for an area (a UUID). This must be left empty - // when creating a new area. - ID string - - // PeerDatacenter is the peer Consul datacenter that will make up the - // other side of this network area. Network areas always involve a pair - // of datacenters: the datacenter where the area was created, and the - // peer datacenter. This is required. - PeerDatacenter string - - // RetryJoin specifies the address of Consul servers to join to, such as - // an IPs or hostnames with an optional port number. This is optional. - RetryJoin []string - - // UseTLS specifies whether gossip over this area should be encrypted with TLS - // if possible. - UseTLS bool -} - -// AreaJoinResponse is returned when a join occurs and gives the result for each -// address. -type AreaJoinResponse struct { - // The address that was joined. - Address string - - // Whether or not the join was a success. - Joined bool - - // If we couldn't join, this is the message with information. - Error string -} - -// SerfMember is a generic structure for reporting information about members in -// a Serf cluster. This is only used by the area endpoints right now, but this -// could be expanded to other endpoints in the future. -type SerfMember struct { - // ID is the node identifier (a UUID). - ID string - - // Name is the node name. - Name string - - // Addr has the IP address. - Addr net.IP - - // Port is the RPC port. - Port uint16 - - // Datacenter is the DC name. - Datacenter string - - // Role is "client", "server", or "unknown". - Role string - - // Build has the version of the Consul agent. - Build string - - // Protocol is the protocol of the Consul agent. - Protocol int - - // Status is the Serf health status "none", "alive", "leaving", "left", - // or "failed". - Status string - - // RTT is the estimated round trip time from the server handling the - // request to the this member. This will be negative if no RTT estimate - // is available. - RTT time.Duration -} - -// AreaCreate will create a new network area. The ID in the given structure must -// be empty and a generated ID will be returned on success. -func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("POST", "/v1/operator/area") - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := op.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaUpdate will update the configuration of the network area with the given ID. -func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := op.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaGet returns a single network area. -func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaList returns all the available network areas. -func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaDelete deletes the given network area. -func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { - r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - rtt, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// AreaJoin attempts to join the given set of join addresses to the given -// network area. See the Area structure for details about join addresses. -func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") - r.setWriteOptions(q) - r.obj = addresses - rtt, resp, err := op.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out []*AreaJoinResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, wm, nil -} - -// AreaMembers lists the Serf information about the members in the given area. -func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { - var out []*SerfMember - qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_audit.go b/vendor/github.com/hashicorp/consul/api/operator_audit.go deleted file mode 100644 index 5240d38a70d..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_audit.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// The /v1/operator/audit-hash endpoint is available only in Consul Enterprise and -// interact with its audit logging subsystem. - -package api - -type AuditHashRequest struct { - Input string -} - -type AuditHashResponse struct { - Hash string -} - -func (op *Operator) AuditHash(a *AuditHashRequest, q *QueryOptions) (*AuditHashResponse, error) { - r := op.c.newRequest("POST", "/v1/operator/audit-hash") - r.setQueryOptions(q) - r.obj = a - - rtt, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out AuditHashResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return &out, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go deleted file mode 100644 index 7628bf6f2ff..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// AutopilotConfiguration is used for querying/setting the Autopilot configuration. -// Autopilot helps manage operator tasks related to Consul servers like removing -// failed servers from the Raft quorum. -type AutopilotConfiguration struct { - // CleanupDeadServers controls whether to remove dead servers from the Raft - // peer list when a new server joins - CleanupDeadServers bool - - // LastContactThreshold is the limit on the amount of time a server can go - // without leader contact before being considered unhealthy. - LastContactThreshold *ReadableDuration - - // MaxTrailingLogs is the amount of entries in the Raft Log that a server can - // be behind before being considered unhealthy. - MaxTrailingLogs uint64 - - // MinQuorum sets the minimum number of servers allowed in a cluster before - // autopilot can prune dead servers. - MinQuorum uint - - // ServerStabilizationTime is the minimum amount of time a server must be - // in a stable, healthy state before it can be added to the cluster. Only - // applicable with Raft protocol version 3 or higher. - ServerStabilizationTime *ReadableDuration - - // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating - // servers into zones for redundancy. If left blank, this feature will be disabled. - RedundancyZoneTag string - - // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration - // strategy of waiting until enough newer-versioned servers have been added to the - // cluster before promoting them to voters. - DisableUpgradeMigration bool - - // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when - // performing upgrade migrations. If left blank, the Consul version will be used. - UpgradeVersionTag string - - // CreateIndex holds the index corresponding the creation of this configuration. - // This is a read-only field. - CreateIndex uint64 - - // ModifyIndex will be set to the index of the last update when retrieving the - // Autopilot configuration. Resubmitting a configuration with - // AutopilotCASConfiguration will perform a check-and-set operation which ensures - // there hasn't been a subsequent update since the configuration was retrieved. - ModifyIndex uint64 -} - -// Defines default values for the AutopilotConfiguration type, consistent with -// https://www.consul.io/api-docs/operator/autopilot#parameters-1 -func NewAutopilotConfiguration() AutopilotConfiguration { - cfg := AutopilotConfiguration{ - CleanupDeadServers: true, - LastContactThreshold: NewReadableDuration(200 * time.Millisecond), - MaxTrailingLogs: 250, - MinQuorum: 0, - ServerStabilizationTime: NewReadableDuration(10 * time.Second), - RedundancyZoneTag: "", - DisableUpgradeMigration: false, - UpgradeVersionTag: "", - } - - return cfg -} - -// ServerHealth is the health (from the leader's point of view) of a server. -type ServerHealth struct { - // ID is the raft ID of the server. - ID string - - // Name is the node name of the server. - Name string - - // Address is the address of the server. - Address string - - // The status of the SerfHealth check for the server. - SerfStatus string - - // Version is the Consul version of the server. - Version string - - // Leader is whether this server is currently the leader. - Leader bool - - // LastContact is the time since this node's last contact with the leader. - LastContact *ReadableDuration - - // LastTerm is the highest leader term this server has a record of in its Raft log. - LastTerm uint64 - - // LastIndex is the last log index this server has a record of in its Raft log. - LastIndex uint64 - - // Healthy is whether or not the server is healthy according to the current - // Autopilot config. - Healthy bool - - // Voter is whether this is a voting server. - Voter bool - - // StableSince is the last time this server's Healthy value changed. - StableSince time.Time -} - -// OperatorHealthReply is a representation of the overall health of the cluster -type OperatorHealthReply struct { - // Healthy is true if all the servers in the cluster are healthy. - Healthy bool - - // FailureTolerance is the number of healthy servers that could be lost without - // an outage occurring. - FailureTolerance int - - // Servers holds the health of each server. - Servers []ServerHealth -} - -type AutopilotState struct { - Healthy bool - FailureTolerance int - OptimisticFailureTolerance int - - Servers map[string]AutopilotServer - Leader string - Voters []string - ReadReplicas []string `json:",omitempty"` - RedundancyZones map[string]AutopilotZone `json:",omitempty"` - Upgrade *AutopilotUpgrade `json:",omitempty"` -} - -type AutopilotServer struct { - ID string - Name string - Address string - NodeStatus string - Version string - LastContact *ReadableDuration - LastTerm uint64 - LastIndex uint64 - Healthy bool - StableSince time.Time - RedundancyZone string `json:",omitempty"` - UpgradeVersion string `json:",omitempty"` - ReadReplica bool - Status AutopilotServerStatus - Meta map[string]string - NodeType AutopilotServerType -} - -type AutopilotServerStatus string - -const ( - AutopilotServerNone AutopilotServerStatus = "none" - AutopilotServerLeader AutopilotServerStatus = "leader" - AutopilotServerVoter AutopilotServerStatus = "voter" - AutopilotServerNonVoter AutopilotServerStatus = "non-voter" - AutopilotServerStaging AutopilotServerStatus = "staging" -) - -type AutopilotServerType string - -const ( - AutopilotTypeVoter AutopilotServerType = "voter" - AutopilotTypeReadReplica AutopilotServerType = "read-replica" - AutopilotTypeZoneVoter AutopilotServerType = "zone-voter" - AutopilotTypeZoneExtraVoter AutopilotServerType = "zone-extra-voter" - AutopilotTypeZoneStandby AutopilotServerType = "zone-standby" -) - -type AutopilotZone struct { - Servers []string - Voters []string - FailureTolerance int -} - -type AutopilotZoneUpgradeVersions struct { - TargetVersionVoters []string `json:",omitempty"` - TargetVersionNonVoters []string `json:",omitempty"` - OtherVersionVoters []string `json:",omitempty"` - OtherVersionNonVoters []string `json:",omitempty"` -} - -type AutopilotUpgrade struct { - Status AutopilotUpgradeStatus - TargetVersion string `json:",omitempty"` - TargetVersionVoters []string `json:",omitempty"` - TargetVersionNonVoters []string `json:",omitempty"` - TargetVersionReadReplicas []string `json:",omitempty"` - OtherVersionVoters []string `json:",omitempty"` - OtherVersionNonVoters []string `json:",omitempty"` - OtherVersionReadReplicas []string `json:",omitempty"` - RedundancyZones map[string]AutopilotZoneUpgradeVersions `json:",omitempty"` -} - -type AutopilotUpgradeStatus string - -const ( - // AutopilotUpgradeIdle is the status when no upgrade is in progress. - AutopilotUpgradeIdle AutopilotUpgradeStatus = "idle" - - // AutopilotUpgradeAwaitNewVoters is the status when more servers of - // the target version must be added in order to start the promotion - // phase of the upgrade - AutopilotUpgradeAwaitNewVoters AutopilotUpgradeStatus = "await-new-voters" - - // AutopilotUpgradePromoting is the status when autopilot is promoting - // servers of the target version. - AutopilotUpgradePromoting AutopilotUpgradeStatus = "promoting" - - // AutopilotUpgradeDemoting is the status when autopilot is demoting - // servers not on the target version - AutopilotUpgradeDemoting AutopilotUpgradeStatus = "demoting" - - // AutopilotUpgradeLeaderTransfer is the status when autopilot is transferring - // leadership from a server running an older version to a server - // using the target version. - AutopilotUpgradeLeaderTransfer AutopilotUpgradeStatus = "leader-transfer" - - // AutopilotUpgradeAwaitNewServers is the status when autpilot has finished - // transferring leadership and has demoted all the other versioned - // servers but wants to indicate that more target version servers - // are needed to replace all the existing other version servers. - AutopilotUpgradeAwaitNewServers AutopilotUpgradeStatus = "await-new-servers" - - // AutopilotUpgradeAwaitServerRemoval is the status when autopilot is waiting - // for the servers on non-target versions to be removed - AutopilotUpgradeAwaitServerRemoval AutopilotUpgradeStatus = "await-server-removal" - - // AutopilotUpgradeDisabled is the status when automated ugprades are - // disabled in the autopilot configuration - AutopilotUpgradeDisabled AutopilotUpgradeStatus = "disabled" -) - -// ReadableDuration is a duration type that is serialized to JSON in human readable format. -type ReadableDuration time.Duration - -func NewReadableDuration(dur time.Duration) *ReadableDuration { - d := ReadableDuration(dur) - return &d -} - -func (d *ReadableDuration) String() string { - return d.Duration().String() -} - -func (d *ReadableDuration) Duration() time.Duration { - if d == nil { - return time.Duration(0) - } - return time.Duration(*d) -} - -func (d *ReadableDuration) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil -} - -func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) { - if d == nil { - return fmt.Errorf("cannot unmarshal to nil pointer") - } - - var dur time.Duration - str := string(raw) - if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' { - // quoted string - dur, err = time.ParseDuration(str[1 : len(str)-1]) - if err != nil { - return err - } - } else { - // no quotes, not a string - v, err := strconv.ParseFloat(str, 64) - if err != nil { - return err - } - dur = time.Duration(v) - } - - *d = ReadableDuration(dur) - return nil -} - -// AutopilotGetConfiguration is used to query the current Autopilot configuration. -func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") - r.setQueryOptions(q) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out AutopilotConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return &out, nil -} - -// AutopilotSetConfiguration is used to set the current Autopilot configuration. -func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.obj = conf - _, resp, err := op.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// AutopilotCASConfiguration is used to perform a Check-And-Set update on the -// Autopilot configuration. The ModifyIndex value will be respected. Returns -// true on success or false on failures. -func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) - r.obj = conf - _, resp, err := op.c.doRequest(r) - if err != nil { - return false, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return false, err - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - - return res, nil -} - -// AutopilotServerHealth -func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/health") - r.setQueryOptions(q) - - // we use 429 status to indicate unhealthiness - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - err = requireHttpCodes(resp, 200, 429) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - - var out OperatorHealthReply - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -func (op *Operator) AutopilotState(q *QueryOptions) (*AutopilotState, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/state") - r.setQueryOptions(q) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out AutopilotState - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return &out, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go deleted file mode 100644 index aefec9e2704..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// keyringRequest is used for performing Keyring operations -type keyringRequest struct { - Key string -} - -// KeyringResponse is returned when listing the gossip encryption keys -type KeyringResponse struct { - // Whether this response is for a WAN ring - WAN bool - - // The datacenter name this request corresponds to - Datacenter string - - // Segment has the network segment this request corresponds to. - Segment string - - // Partition has the admin partition this request corresponds to. - Partition string `json:",omitempty"` - - // Messages has information or errors from serf - Messages map[string]string `json:",omitempty"` - - // A map of the encryption keys to the number of nodes they're installed on - Keys map[string]int - - // A map of the encryption primary keys to the number of nodes they're installed on - PrimaryKeys map[string]int - - // The total number of nodes in this ring - NumNodes int -} - -// KeyringInstall is used to install a new gossip encryption key into the cluster -func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { - r := op.c.newRequest("POST", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := op.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// KeyringList is used to list the gossip keys installed in the cluster -func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { - r := op.c.newRequest("GET", "/v1/operator/keyring") - r.setQueryOptions(q) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out []*KeyringResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// KeyringRemove is used to remove a gossip encryption key from the cluster -func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := op.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// KeyringUse is used to change the active gossip encryption key -func (op *Operator) KeyringUse(key string, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := op.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go deleted file mode 100644 index 1e3496da0e1..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_license.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "io" - "strings" - "time" -) - -type License struct { - // The unique identifier of the license - LicenseID string `json:"license_id"` - - // The customer ID associated with the license - CustomerID string `json:"customer_id"` - - // If set, an identifier that should be used to lock the license to a - // particular site, cluster, etc. - InstallationID string `json:"installation_id"` - - // The time at which the license was issued - IssueTime time.Time `json:"issue_time"` - - // The time at which the license starts being valid - StartTime time.Time `json:"start_time"` - - // The time after which the license expires - ExpirationTime time.Time `json:"expiration_time"` - - // The time at which the license ceases to function and can - // no longer be used in any capacity - TerminationTime time.Time `json:"termination_time"` - - // Whether the license will ignore termination - IgnoreTermination bool `json:"ignore_termination"` - - // The product the license is valid for - Product string `json:"product"` - - // License Specific Flags - Flags map[string]interface{} `json:"flags"` - - // Modules is a list of the licensed enterprise modules - Modules []string `json:"modules"` - - // List of features enabled by the license - Features []string `json:"features"` -} - -type LicenseReply struct { - Valid bool - License *License - Warnings []string -} - -func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, error) { - var reply LicenseReply - if _, err := op.c.query("/v1/operator/license", &reply, q); err != nil { - return nil, err - } else { - return &reply, nil - } -} - -func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) { - r := op.c.newRequest("GET", "/v1/operator/license") - r.params.Set("signed", "1") - r.setQueryOptions(q) - _, resp, err := op.c.doRequest(r) - if err != nil { - return "", err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", err - } - - data, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - return string(data), nil -} - -// LicenseReset will reset the license to the builtin one if it is still valid. -// If the builtin license is invalid, the current license stays active. -// -// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses -// are now set via agent configuration instead of through the API -func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) { - var reply LicenseReply - r := op.c.newRequest("DELETE", "/v1/operator/license") - r.setWriteOptions(opts) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - if err := decodeBody(resp, &reply); err != nil { - return nil, err - } - return &reply, nil -} - -// LicensePut will configure the Consul Enterprise license for the target datacenter -// -// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses -// are now set via agent configuration instead of through the API -func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) { - var reply LicenseReply - r := op.c.newRequest("PUT", "/v1/operator/license") - r.setWriteOptions(opts) - r.body = strings.NewReader(license) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - if err := decodeBody(resp, &reply); err != nil { - return nil, err - } - - return &reply, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go deleted file mode 100644 index d72c00c97b9..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Consul. - ID string - - // Node is the node name of the server, as known by Consul, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address string - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Protocol version is the raft protocol version used by the server - ProtocolVersion string - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Consul. - Voter bool - - // LastIndex is the last log index this server has a record of in its Raft log. - LastIndex uint64 -} - -// RaftConfiguration is returned when querying for the current Raft configuration. -type RaftConfiguration struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// TransferLeaderResponse is returned when querying for the current Raft configuration. -type TransferLeaderResponse struct { - Success bool -} - -// RaftGetConfiguration is used to query the current Raft peer set. -func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/raft/configuration") - r.setQueryOptions(q) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out RaftConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// RaftLeaderTransfer is used to transfer the current raft leader to another node -func (op *Operator) RaftLeaderTransfer(q *QueryOptions) (*TransferLeaderResponse, error) { - r := op.c.newRequest("POST", "/v1/operator/raft/transfer-leader") - r.setQueryOptions(q) - _, resp, err := op.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var out TransferLeaderResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by address in the form of -// "IP:port". -func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("address", address) - - _, resp, err := op.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} - -// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by ID. -func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("id", id) - - _, resp, err := op.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go deleted file mode 100644 index 6115a7ab4b5..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_segment.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// SegmentList returns all the available LAN segments. -func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { - var out []string - qm, err := op.c.query("/v1/operator/segment", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_usage.go b/vendor/github.com/hashicorp/consul/api/operator_usage.go deleted file mode 100644 index 8977449ddd3..00000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_usage.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -type Usage struct { - // Usage is a map of datacenter -> usage information - Usage map[string]ServiceUsage -} - -// ServiceUsage contains information about the number of services and service instances for a datacenter. -type ServiceUsage struct { - Nodes int - Services int - ServiceInstances int - ConnectServiceInstances map[string]int - - // Billable services are of "typical" service kind (i.e. non-connect or connect-native), - // excluding the "consul" service. - BillableServiceInstances int - - // A map of partition+namespace to number of unique services registered in that namespace - PartitionNamespaceServices map[string]map[string]int - - // A map of partition+namespace to number of service instances registered in that namespace - PartitionNamespaceServiceInstances map[string]map[string]int - - // A map of partition+namespace+kind to number of service-mesh instances registered in that namespace - PartitionNamespaceConnectServiceInstances map[string]map[string]map[string]int - - // A map of partition+namespace to number of billable instances registered in that namespace - PartitionNamespaceBillableServiceInstances map[string]map[string]int -} - -// Usage is used to query for usage information in the given datacenter. -func (op *Operator) Usage(q *QueryOptions) (*Usage, *QueryMeta, error) { - r := op.c.newRequest("GET", "/v1/operator/usage") - r.setQueryOptions(q) - rtt, resp, err := op.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *Usage - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/partition.go b/vendor/github.com/hashicorp/consul/api/partition.go deleted file mode 100644 index 8467c311896..00000000000 --- a/vendor/github.com/hashicorp/consul/api/partition.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "fmt" - "time" -) - -// Partition is the configuration of a single admin partition. Admin Partitions are a Consul Enterprise feature. -type Partition struct { - // Name is the name of the Partition. - Name string `json:"Name"` - - // Description is where the user puts any information they want - // about the admin partition. It is not used internally. - Description string `json:"Description,omitempty"` - - // DeletedAt is the time when the Partition was marked for deletion - // This is nullable so that we can omit if empty when encoding in JSON - DeletedAt *time.Time `json:"DeletedAt,omitempty" alias:"deleted_at"` - - // CreateIndex is the Raft index at which the Partition was created - CreateIndex uint64 `json:"CreateIndex,omitempty"` - - // ModifyIndex is the latest Raft index at which the Partition was modified. - ModifyIndex uint64 `json:"ModifyIndex,omitempty"` -} - -// PartitionDefaultName is the default partition value. -const PartitionDefaultName = "default" - -// Partitions can be used to manage Partitions in Consul Enterprise. -type Partitions struct { - c *Client -} - -// Operator returns a handle to the operator endpoints. -func (c *Client) Partitions() *Partitions { - return &Partitions{c} -} - -func (p *Partitions) Create(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { - if partition.Name == "" { - return nil, nil, fmt.Errorf("Must specify a Name for Partition creation") - } - - r := p.c.newRequest("PUT", "/v1/partition") - r.setWriteOptions(q) - r.ctx = ctx - r.obj = partition - rtt, resp, err := p.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out Partition - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -func (p *Partitions) Update(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { - if partition.Name == "" { - return nil, nil, fmt.Errorf("Must specify a Name for Partition updating") - } - - r := p.c.newRequest("PUT", "/v1/partition/"+partition.Name) - r.setWriteOptions(q) - r.ctx = ctx - r.obj = partition - rtt, resp, err := p.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - var out Partition - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -func (p *Partitions) Read(ctx context.Context, name string, q *QueryOptions) (*Partition, *QueryMeta, error) { - var out Partition - r := p.c.newRequest("GET", "/v1/partition/"+name) - r.setQueryOptions(q) - r.ctx = ctx - rtt, resp, err := p.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -func (p *Partitions) Delete(ctx context.Context, name string, q *WriteOptions) (*WriteMeta, error) { - r := p.c.newRequest("DELETE", "/v1/partition/"+name) - r.setWriteOptions(q) - r.ctx = ctx - rtt, resp, err := p.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -func (p *Partitions) List(ctx context.Context, q *QueryOptions) ([]*Partition, *QueryMeta, error) { - var out []*Partition - r := p.c.newRequest("GET", "/v1/partitions") - r.setQueryOptions(q) - r.ctx = ctx - rtt, resp, err := p.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/peering.go b/vendor/github.com/hashicorp/consul/api/peering.go deleted file mode 100644 index dd7780f630a..00000000000 --- a/vendor/github.com/hashicorp/consul/api/peering.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "fmt" - "time" -) - -// PeeringState enumerates all the states a peering can be in -type PeeringState string - -const ( - // PeeringStateUndefined represents an unset value for PeeringState during - // writes. - PeeringStateUndefined PeeringState = "UNDEFINED" - - // PeeringStatePending means the peering was created by generating a peering token. - // Peerings stay in a pending state until the peer uses the token to dial - // the local cluster. - PeeringStatePending PeeringState = "PENDING" - - // PeeringStateEstablishing means the peering is being established from a peering token. - // This is the initial state for dialing peers. - PeeringStateEstablishing PeeringState = "ESTABLISHING" - - // PeeringStateActive means that the peering connection is active and - // healthy. - PeeringStateActive PeeringState = "ACTIVE" - - // PeeringStateFailing means the peering connection has been interrupted - // but has not yet been terminated. - PeeringStateFailing PeeringState = "FAILING" - - // PeeringStateDeleting means a peering was marked for deletion and is in the process - // of being deleted. - PeeringStateDeleting PeeringState = "DELETING" - - // PeeringStateTerminated means the peering relationship has been removed. - PeeringStateTerminated PeeringState = "TERMINATED" -) - -type PeeringRemoteInfo struct { - // Partition is the remote peer's partition. - Partition string - // Datacenter is the remote peer's datacenter. - Datacenter string - Locality *Locality `json:",omitempty"` -} - -// Locality identifies where a given entity is running. -type Locality struct { - // Region is region the zone belongs to. - Region string - - // Zone is the zone the entity is running in. - Zone string -} - -type Peering struct { - // ID is a datacenter-scoped UUID for the peering. - ID string - // Name is the local alias for the peering relationship. - Name string - // Partition is the local partition connecting to the peer. - Partition string `json:",omitempty"` - // DeletedAt is the time when the Peering was marked for deletion - DeletedAt *time.Time `json:",omitempty" alias:"deleted_at"` - // Meta is a mapping of some string value to any other string value - Meta map[string]string `json:",omitempty"` - // State is one of the valid PeeringState values to represent the status of - // peering relationship. - State PeeringState - // PeerID is the ID that our peer assigned to this peering. This ID is to - // be used when dialing the peer, so that it can know who dialed it. - PeerID string `json:",omitempty"` - // PeerCAPems contains all the CA certificates for the remote peer. - PeerCAPems []string `json:",omitempty"` - // PeerServerName is the name of the remote server as it relates to TLS. - PeerServerName string `json:",omitempty"` - // PeerServerAddresses contains all the connection addresses for the remote peer. - PeerServerAddresses []string `json:",omitempty"` - // StreamStatus contains information computed on read based on the state of the stream. - StreamStatus PeeringStreamStatus - // CreateIndex is the Raft index at which the Peering was created. - CreateIndex uint64 - // ModifyIndex is the latest Raft index at which the Peering was modified. - ModifyIndex uint64 - // Remote contains metadata for the remote peer. - Remote PeeringRemoteInfo -} - -type PeeringStreamStatus struct { - // ImportedServices is the list of services imported from this peering. - ImportedServices []string - // ExportedServices is the list of services exported to this peering. - ExportedServices []string - // LastHeartbeat represents when the last heartbeat message was received. - LastHeartbeat *time.Time - // LastReceive represents when any message was last received, regardless of success or error. - LastReceive *time.Time - // LastSend represents when any message was last sent, regardless of success or error. - LastSend *time.Time -} - -type PeeringReadResponse struct { - Peering *Peering -} - -type PeeringGenerateTokenRequest struct { - // PeerName is the name of the remote peer. - PeerName string - // Partition to be peered. - Partition string `json:",omitempty"` - // Meta is a mapping of some string value to any other string value - Meta map[string]string `json:",omitempty"` - // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify - // load balancer(s) or external IPs to reach the servers from the dialing side, and will override any server - // addresses obtained from the "consul" service. - ServerExternalAddresses []string `json:",omitempty"` -} - -type PeeringGenerateTokenResponse struct { - // PeeringToken is an opaque string provided to the remote peer for it to complete - // the peering initialization handshake. - PeeringToken string -} - -type PeeringEstablishRequest struct { - // Name of the remote peer. - PeerName string - // The peering token returned from the peer's GenerateToken endpoint. - PeeringToken string `json:",omitempty"` - // Partition to be peered. - Partition string `json:",omitempty"` - // Meta is a mapping of some string value to any other string value - Meta map[string]string `json:",omitempty"` -} - -type PeeringEstablishResponse struct { -} - -type PeeringListRequest struct { - // future proofing in case we extend List functionality -} - -type Peerings struct { - c *Client -} - -// Peerings returns a handle to the operator endpoints. -func (c *Client) Peerings() *Peerings { - return &Peerings{c: c} -} - -func (p *Peerings) Read(ctx context.Context, name string, q *QueryOptions) (*Peering, *QueryMeta, error) { - if name == "" { - return nil, nil, fmt.Errorf("peering name cannot be empty") - } - - req := p.c.newRequest("GET", fmt.Sprintf("/v1/peering/%s", name)) - req.setQueryOptions(q) - req.ctx = ctx - - rtt, resp, err := p.c.doRequest(req) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - found, resp, err := requireNotFoundOrOK(resp) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if !found { - return nil, qm, nil - } - - var out Peering - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, qm, nil -} - -func (p *Peerings) Delete(ctx context.Context, name string, q *WriteOptions) (*WriteMeta, error) { - if name == "" { - return nil, fmt.Errorf("peering name cannot be empty") - } - - req := p.c.newRequest("DELETE", fmt.Sprintf("/v1/peering/%s", name)) - req.setWriteOptions(q) - req.ctx = ctx - - rtt, resp, err := p.c.doRequest(req) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// TODO(peering): verify this is the ultimate signature we want -func (p *Peerings) GenerateToken(ctx context.Context, g PeeringGenerateTokenRequest, wq *WriteOptions) (*PeeringGenerateTokenResponse, *WriteMeta, error) { - if g.PeerName == "" { - return nil, nil, fmt.Errorf("peer name cannot be empty") - } - - req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/token")) - req.setWriteOptions(wq) - req.ctx = ctx - req.obj = g - - rtt, resp, err := p.c.doRequest(req) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - - var out PeeringGenerateTokenResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// TODO(peering): verify this is the ultimate signature we want -func (p *Peerings) Establish(ctx context.Context, i PeeringEstablishRequest, wq *WriteOptions) (*PeeringEstablishResponse, *WriteMeta, error) { - req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/establish")) - req.setWriteOptions(wq) - req.ctx = ctx - req.obj = i - - rtt, resp, err := p.c.doRequest(req) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - - var out PeeringEstablishResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -func (p *Peerings) List(ctx context.Context, q *QueryOptions) ([]*Peering, *QueryMeta, error) { - req := p.c.newRequest("GET", "/v1/peerings") - req.setQueryOptions(q) - req.ctx = ctx - - rtt, resp, err := p.c.doRequest(req) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Peering - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go deleted file mode 100644 index 8ebc852f3ac..00000000000 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// QueryFailoverOptions sets options about how we fail over if there are no -// healthy nodes in the local datacenter. -type QueryFailoverOptions struct { - // NearestN is set to the number of remote datacenters to try, based on - // network coordinates. - NearestN int - - // Datacenters is a fixed list of datacenters to try after NearestN. We - // never try a datacenter multiple times, so those are subtracted from - // this list before proceeding. - Datacenters []string - - // Targets is a fixed list of datacenters and peers to try. This field cannot - // be populated with NearestN or Datacenters. - Targets []QueryFailoverTarget -} - -// Deprecated: use QueryFailoverOptions instead. -type QueryDatacenterOptions = QueryFailoverOptions - -type QueryFailoverTarget struct { - // Peer specifies a peer to try during failover. - Peer string - - // Datacenter specifies a datacenter to try during failover. - Datacenter string - - // Partition specifies a partition to try during failover - // Note: Partition are available only in Consul Enterprise - Partition string `json:",omitempty"` - - // Namespace specifies a namespace to try during failover - // Note: Namespaces are available only in Consul Enterprise - Namespace string `json:",omitempty"` -} - -// QueryDNSOptions controls settings when query results are served over DNS. -type QueryDNSOptions struct { - // TTL is the time to live for the served DNS results. - TTL string -} - -// ServiceQuery is used to query for a set of healthy nodes offering a specific -// service. -type ServiceQuery struct { - // Service is the service to query. - Service string - - // SamenessGroup specifies a sameness group to query. The first member of the Sameness Group will - // be targeted first on PQ execution and subsequent members will be targeted during failover scenarios. - // This field is mutually exclusive with Failover. - SamenessGroup string `json:",omitempty"` - - // Namespace of the service to query - Namespace string `json:",omitempty"` - - // Partition of the service to query - Partition string `json:",omitempty"` - - // Near allows baking in the name of a node to automatically distance- - // sort from. The magic "_agent" value is supported, which sorts near - // the agent which initiated the request by default. - Near string - - // Failover controls what we do if there are no healthy nodes in the - // local datacenter. - Failover QueryFailoverOptions `json:",omitempty"` - - // IgnoreCheckIDs is an optional list of health check IDs to ignore when - // considering which nodes are healthy. It is useful as an emergency measure - // to temporarily override some health check that is producing false negatives - // for example. - IgnoreCheckIDs []string - - // If OnlyPassing is true then we will only include nodes with passing - // health checks (critical AND warning checks will cause a node to be - // discarded) - OnlyPassing bool - - // Tags are a set of required and/or disallowed tags. If a tag is in - // this list it must be present. If the tag is preceded with "!" then - // it is disallowed. - Tags []string - - // NodeMeta is a map of required node metadata fields. If a key/value - // pair is in this map it must be present on the node in order for the - // service entry to be returned. - NodeMeta map[string]string - - // ServiceMeta is a map of required service metadata fields. If a key/value - // pair is in this map it must be present on the node in order for the - // service entry to be returned. - ServiceMeta map[string]string - - // Connect if true will filter the prepared query results to only - // include Connect-capable services. These include both native services - // and proxies for matching services. Note that if a proxy matches, - // the constraints in the query above (Near, OnlyPassing, etc.) apply - // to the _proxy_ and not the service being proxied. In practice, proxies - // should be directly next to their services so this isn't an issue. - Connect bool -} - -// QueryTemplate carries the arguments for creating a templated query. -type QueryTemplate struct { - // Type specifies the type of the query template. Currently only - // "name_prefix_match" is supported. This field is required. - Type string - - // Regexp allows specifying a regex pattern to match against the name - // of the query being executed. - Regexp string - - // RemoveEmptyTags if set to true, will cause the Tags list inside - // the Service structure to be stripped of any empty strings. This is useful - // when interpolating into tags in a way where the tag is optional, and - // where searching for an empty tag would yield no results from the query. - RemoveEmptyTags bool -} - -// PreparedQueryDefinition defines a complete prepared query. -type PreparedQueryDefinition struct { - // ID is this UUID-based ID for the query, always generated by Consul. - ID string - - // Name is an optional friendly name for the query supplied by the - // user. NOTE - if this feature is used then it will reduce the security - // of any read ACL associated with this query/service since this name - // can be used to locate nodes with supplying any ACL. - Name string - - // Session is an optional session to tie this query's lifetime to. If - // this is omitted then the query will not expire. - Session string - - // Token is the ACL token used when the query was created, and it is - // used when a query is subsequently executed. This token, or a token - // with management privileges, must be used to change the query later. - Token string - - // Service defines a service query (leaving things open for other types - // later). - Service ServiceQuery - - // DNS has options that control how the results of this query are - // served over DNS. - DNS QueryDNSOptions - - // Template is used to pass through the arguments for creating a - // prepared query with an attached template. If a template is given, - // interpolations are possible in other struct fields. - Template QueryTemplate -} - -// PreparedQueryExecuteResponse has the results of executing a query. -type PreparedQueryExecuteResponse struct { - // Service is the service that was queried. - Service string - - // Namespace of the service that was queried - Namespace string `json:",omitempty"` - - // Nodes has the nodes that were output by the query. - Nodes []ServiceEntry - - // DNS has the options for serving these results over DNS. - DNS QueryDNSOptions - - // Datacenter is the datacenter that these results came from. - Datacenter string - - // Failovers is a count of how many times we had to query a remote - // datacenter. - Failovers int -} - -// PreparedQuery can be used to query the prepared query endpoints. -type PreparedQuery struct { - c *Client -} - -// PreparedQuery returns a handle to the prepared query endpoints. -func (c *Client) PreparedQuery() *PreparedQuery { - return &PreparedQuery{c} -} - -// Create makes a new prepared query. The ID of the new query is returned. -func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { - r := c.c.newRequest("POST", "/v1/query") - r.setWriteOptions(q) - r.obj = query - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return "", nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update makes updates to an existing prepared query. -func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { - return c.c.write("/v1/query/"+query.ID, query, nil, q) -} - -// List is used to fetch all the prepared queries (always requires a management -// token). -func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Get is used to fetch a specific prepared query. -func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query/"+queryID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Delete is used to delete a specific prepared query. -func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("DELETE", "/v1/query/"+queryID) - r.setWriteOptions(q) - rtt, resp, err := c.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// Execute is used to execute a specific prepared query. You can execute using -// a query ID or name. -func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { - var out *PreparedQueryExecuteResponse - qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go deleted file mode 100644 index 639513d29fa..00000000000 --- a/vendor/github.com/hashicorp/consul/api/raw.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go deleted file mode 100644 index 9d98ff5c29b..00000000000 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "encoding/json" - "fmt" - "path" - "sync" - "time" -) - -const ( - // DefaultSemaphoreSessionName is the Session Name we assign if none is provided - DefaultSemaphoreSessionName = "Consul API Semaphore" - - // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided - // when creating a new Semaphore. This is used because we do not have another - // other check to depend upon. - DefaultSemaphoreSessionTTL = "15s" - - // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore - // acquisition is possible. This affects the minimum time it takes to cancel - // a Semaphore acquisition. - DefaultSemaphoreWaitTime = 15 * time.Second - - // DefaultSemaphoreKey is the key used within the prefix to - // use for coordination between all the contenders. - DefaultSemaphoreKey = ".lock" - - // SemaphoreFlagValue is a magic flag we set to indicate a key - // is being used for a semaphore. It is used to detect a potential - // conflict with a lock. - SemaphoreFlagValue = 0xe0f69a2baa414de0 -) - -var ( - // ErrSemaphoreHeld is returned if we attempt to double lock - ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") - - // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore - // that we do not hold. - ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") - - // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore - // that is in use. - ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") - - // ErrSemaphoreConflict is returned if the flags on a key - // used for a semaphore do not match expectation - ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") -) - -// Semaphore is used to implement a distributed semaphore -// using the Consul KV primitives. -type Semaphore struct { - c *Client - opts *SemaphoreOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// SemaphoreOptions is used to parameterize the Semaphore -type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime - SemaphoreTryOnce bool // Optional, defaults to false which means try forever - Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace -} - -// semaphoreLock is written under the DefaultSemaphoreKey and -// is used to coordinate between all the contenders. -type semaphoreLock struct { - // Limit is the integer limit of holders. This is used to - // verify that all the holders agree on the value. - Limit int - - // Holders is a list of all the semaphore holders. - // It maps the session ID to true. It is used as a set effectively. - Holders map[string]bool -} - -// SemaphorePrefix is used to created a Semaphore which will operate -// at the given KV prefix and uses the given limit for the semaphore. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. -func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { - opts := &SemaphoreOptions{ - Prefix: prefix, - Limit: limit, - } - return c.SemaphoreOpts(opts) -} - -// SemaphoreOpts is used to create a Semaphore with the given options. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. If a Session is not provided, one will be created. -func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { - if opts.Prefix == "" { - return nil, fmt.Errorf("missing prefix") - } - if opts.Limit <= 0 { - return nil, fmt.Errorf("semaphore limit must be positive") - } - if opts.SessionName == "" { - opts.SessionName = DefaultSemaphoreSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultSemaphoreSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.SemaphoreWaitTime == 0 { - opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime - } - s := &Semaphore{ - c: c, - opts: opts, - } - return s, nil -} - -// Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encountered. -// Providing a non-nil stopCh can be used to abort the attempt. -// On success, a channel is returned that represents our slot. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the slot is held until Release() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the session being lost. -func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return nil, ErrSemaphoreHeld - } - - // Check if we need to create a session first - s.lockSession = s.opts.Session - if s.lockSession == "" { - sess, err := s.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() - } - - // Create the contender entry - kv := s.c.KV() - wOpts := WriteOptions{Namespace: s.opts.Namespace} - - made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), &wOpts) - if err != nil || !made { - return nil, fmt.Errorf("failed to make contender entry: %v", err) - } - - // Setup the query options - qOpts := QueryOptions{ - WaitTime: s.opts.SemaphoreWaitTime, - Namespace: s.opts.Namespace, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if s.opts.SemaphoreTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > s.opts.SemaphoreWaitTime { - return nil, nil - } - - // Query wait time should not exceed the semaphore wait time - qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed - } - attempts++ - - // Read the prefix - pairs, meta, err := kv.List(s.opts.Prefix, &qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read prefix: %v", err) - } - - // Decode the lock - lockPair := s.findLock(pairs) - if lockPair.Flags != SemaphoreFlagValue { - return nil, ErrSemaphoreConflict - } - lock, err := s.decodeLock(lockPair) - if err != nil { - return nil, err - } - - // Verify we agree with the limit - if lock.Limit != s.opts.Limit { - return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", - lock.Limit, s.opts.Limit) - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if the lock is held - if len(lock.Holders) >= lock.Limit { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Create a new lock with us as a holder - lock.Holders[s.lockSession] = true - newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) - if err != nil { - return nil, err - } - - // Attempt the acquisition - didSet, _, err := kv.CAS(newLock, &wOpts) - if err != nil { - return nil, fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - // Update failed, could have been a race with another contender, - // retry the operation - goto WAIT - } - - // Watch to ensure we maintain ownership of the slot - lockCh := make(chan struct{}) - go s.monitorLock(s.lockSession, lockCh) - - // Set that we own the lock - s.isHeld = true - - // Acquired! All done - return lockCh, nil -} - -// Release is used to voluntarily give up our semaphore slot. It is -// an error to call this if the semaphore has not been acquired. -func (s *Semaphore) Release() error { - // Hold the lock as we try to release - s.l.Lock() - defer s.l.Unlock() - - // Ensure the lock is actually held - if !s.isHeld { - return ErrSemaphoreNotHeld - } - - // Set that we no longer own the lock - s.isHeld = false - - // Stop the session renew - if s.sessionRenew != nil { - defer func() { - close(s.sessionRenew) - s.sessionRenew = nil - }() - } - - // Get and clear the lock session - lockSession := s.lockSession - s.lockSession = "" - - // Remove ourselves as a lock holder - kv := s.c.KV() - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - - wOpts := WriteOptions{Namespace: s.opts.Namespace} - qOpts := QueryOptions{Namespace: s.opts.Namespace} - -READ: - pair, _, err := kv.Get(key, &qOpts) - if err != nil { - return err - } - if pair == nil { - pair = &KVPair{} - } - lock, err := s.decodeLock(pair) - if err != nil { - return err - } - - // Create a new lock without us as a holder - if _, ok := lock.Holders[lockSession]; ok { - delete(lock.Holders, lockSession) - newLock, err := s.encodeLock(lock, pair.ModifyIndex) - if err != nil { - return err - } - - // Swap the locks - didSet, _, err := kv.CAS(newLock, &wOpts) - if err != nil { - return fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - goto READ - } - } - - // Destroy the contender entry - contenderKey := path.Join(s.opts.Prefix, lockSession) - if _, err := kv.Delete(contenderKey, &wOpts); err != nil { - return err - } - return nil -} - -// Destroy is used to cleanup the semaphore entry. It is not necessary -// to invoke. It will fail if the semaphore is in use. -func (s *Semaphore) Destroy() error { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return ErrSemaphoreHeld - } - - // List for the semaphore - kv := s.c.KV() - - q := QueryOptions{Namespace: s.opts.Namespace} - pairs, _, err := kv.List(s.opts.Prefix, &q) - if err != nil { - return fmt.Errorf("failed to read prefix: %v", err) - } - - // Find the lock pair, bail if it doesn't exist - lockPair := s.findLock(pairs) - if lockPair.ModifyIndex == 0 { - return nil - } - if lockPair.Flags != SemaphoreFlagValue { - return ErrSemaphoreConflict - } - - // Decode the lock - lock, err := s.decodeLock(lockPair) - if err != nil { - return err - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if there are any holders - if len(lock.Holders) > 0 { - return ErrSemaphoreInUse - } - - // Attempt the delete - w := WriteOptions{Namespace: s.opts.Namespace} - didRemove, _, err := kv.DeleteCAS(lockPair, &w) - if err != nil { - return fmt.Errorf("failed to remove semaphore: %v", err) - } - if !didRemove { - return ErrSemaphoreInUse - } - return nil -} - -// createSession is used to create a new managed session -func (s *Semaphore) createSession() (string, error) { - session := s.c.Session() - se := &SessionEntry{ - Name: s.opts.SessionName, - TTL: s.opts.SessionTTL, - Behavior: SessionBehaviorDelete, - } - - w := WriteOptions{Namespace: s.opts.Namespace} - id, _, err := session.Create(se, &w) - if err != nil { - return "", err - } - return id, nil -} - -// contenderEntry returns a formatted KVPair for the contender -func (s *Semaphore) contenderEntry(session string) *KVPair { - return &KVPair{ - Key: path.Join(s.opts.Prefix, session), - Value: s.opts.Value, - Session: session, - Flags: SemaphoreFlagValue, - } -} - -// findLock is used to find the KV Pair which is used for coordination -func (s *Semaphore) findLock(pairs KVPairs) *KVPair { - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - for _, pair := range pairs { - if pair.Key == key { - return pair - } - } - return &KVPair{Flags: SemaphoreFlagValue} -} - -// decodeLock is used to decode a semaphoreLock from an -// entry in Consul -func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { - // Handle if there is no lock - if pair == nil || pair.Value == nil { - return &semaphoreLock{ - Limit: s.opts.Limit, - Holders: make(map[string]bool), - }, nil - } - - l := &semaphoreLock{} - if err := json.Unmarshal(pair.Value, l); err != nil { - return nil, fmt.Errorf("lock decoding failed: %v", err) - } - return l, nil -} - -// encodeLock is used to encode a semaphoreLock into a KVPair -// that can be PUT -func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { - enc, err := json.Marshal(l) - if err != nil { - return nil, fmt.Errorf("lock encoding failed: %v", err) - } - pair := &KVPair{ - Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), - Value: enc, - Flags: SemaphoreFlagValue, - ModifyIndex: oldIndex, - } - return pair, nil -} - -// pruneDeadHolders is used to remove all the dead lock holders -func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { - // Gather all the live holders - alive := make(map[string]struct{}, len(pairs)) - for _, pair := range pairs { - if pair.Session != "" { - alive[pair.Session] = struct{}{} - } - } - - // Remove any holders that are dead - for holder := range lock.Holders { - if _, ok := alive[holder]; !ok { - delete(lock.Holders, holder) - } - } -} - -// monitorLock is a long running routine to monitor a semaphore ownership -// It closes the stopCh if we lose our slot. -func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := s.c.KV() - opts := QueryOptions{ - RequireConsistent: true, - Namespace: s.opts.Namespace, - } -WAIT: - retries := s.opts.MonitorRetries -RETRY: - pairs, meta, err := kv.List(s.opts.Prefix, &opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(s.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - lockPair := s.findLock(pairs) - lock, err := s.decodeLock(lockPair) - if err != nil { - return - } - s.pruneDeadHolders(lock, pairs) - if _, ok := lock.Holders[session]; ok { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go deleted file mode 100644 index 69fd77d2790..00000000000 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "errors" - "fmt" - "time" -) - -const ( - // SessionBehaviorRelease is the default behavior and causes - // all associated locks to be released on session invalidation. - SessionBehaviorRelease = "release" - - // SessionBehaviorDelete is new in Consul 0.5 and changes the - // behavior to delete all associated locks on session invalidation. - // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. - SessionBehaviorDelete = "delete" -) - -var ErrSessionExpired = errors.New("session expired") - -// SessionEntry represents a session in consul -type SessionEntry struct { - CreateIndex uint64 - ID string - Name string - Node string - LockDelay time.Duration - Behavior string - TTL string - Namespace string `json:",omitempty"` - - // Deprecated for Consul Enterprise in v1.7.0. - Checks []string - - // NodeChecks and ServiceChecks are new in Consul 1.7.0. - // When associating checks with sessions, namespaces can be specified for service checks. - NodeChecks []string - ServiceChecks []ServiceCheck -} - -type ServiceCheck struct { - ID string - Namespace string -} - -// Session can be used to query the Session endpoints -type Session struct { - c *Client -} - -// Session returns a handle to the session endpoints -func (c *Client) Session() *Session { - return &Session{c} -} - -// CreateNoChecks is like Create but is used specifically to create -// a session with no associated health checks. -func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - body := make(map[string]interface{}) - body["NodeChecks"] = []string{} - if se != nil { - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(body, q) - -} - -// Create makes a new session. Providing a session entry can -// customize the session. It can also be nil to use defaults. -func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - var obj interface{} - if se != nil { - body := make(map[string]interface{}) - obj = body - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if len(se.Checks) > 0 { - body["Checks"] = se.Checks - } - if len(se.NodeChecks) > 0 { - body["NodeChecks"] = se.NodeChecks - } - if len(se.ServiceChecks) > 0 { - body["ServiceChecks"] = se.ServiceChecks - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(obj, q) -} - -func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { - var out struct{ ID string } - wm, err := s.c.write("/v1/session/create", obj, &out, q) - if err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Destroy invalidates a given session -func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Renew renews the TTL on a given session -func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { - r := s.c.newRequest("PUT", "/v1/session/renew/"+id) - r.setWriteOptions(q) - rtt, resp, err := s.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer closeResponseBody(resp) - - wm := &WriteMeta{RequestTime: rtt} - - if resp.StatusCode == 404 { - return nil, wm, nil - } else if resp.StatusCode != 200 { - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - - var entries []*SessionEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - if len(entries) > 0 { - return entries[0], wm, nil - } - return nil, wm, nil -} - -// RenewPeriodic is used to periodically invoke Session.Renew on a -// session until a doneCh is closed. This is meant to be used in a long running -// goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { - ctx := q.Context() - - ttl, err := time.ParseDuration(initialTTL) - if err != nil { - return err - } - - waitDur := ttl / 2 - lastRenewTime := time.Now() - var lastErr error - for { - if time.Since(lastRenewTime) > ttl { - return lastErr - } - select { - case <-time.After(waitDur): - entry, _, err := s.Renew(id, q) - if err != nil { - waitDur = time.Second - lastErr = err - continue - } - if entry == nil { - return ErrSessionExpired - } - - // Handle the server updating the TTL - ttl, _ = time.ParseDuration(entry.TTL) - waitDur = ttl / 2 - lastRenewTime = time.Now() - - case <-doneCh: - // Attempt a session destroy - s.Destroy(id, q) - return nil - - case <-ctx.Done(): - // Bail immediately since attempting the destroy would - // use the canceled context in q, which would just bail. - return ctx.Err() - } - } -} - -// Info looks up a single session -func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/info/"+id, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List gets sessions for a node -func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/node/"+node, &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// List gets all active sessions -func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/list", &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go deleted file mode 100644 index bcc80e5b3de..00000000000 --- a/vendor/github.com/hashicorp/consul/api/snapshot.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "io" -) - -// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of -// Consul's internal state and restore snapshots for disaster recovery. -type Snapshot struct { - c *Client -} - -// Snapshot returns a handle that exposes the snapshot endpoints. -func (c *Client) Snapshot() *Snapshot { - return &Snapshot{c} -} - -// Save requests a new snapshot and provides an io.ReadCloser with the snapshot -// data to save. If this doesn't return an error, then it's the responsibility -// of the caller to close it. Only a subset of the QueryOptions are supported: -// Datacenter, AllowStale, and Token. -func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { - r := s.c.newRequest("GET", "/v1/snapshot") - r.setQueryOptions(q) - - rtt, resp, err := s.c.doRequest(r) - if err != nil { - return nil, nil, err - } - if err := requireOK(resp); err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - return resp.Body, qm, nil -} - -// Restore streams in an existing snapshot and attempts to restore it. -func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { - r := s.c.newRequest("PUT", "/v1/snapshot") - r.body = in - r.header.Set("Content-Type", "application/octet-stream") - r.setWriteOptions(q) - _, resp, err := s.c.doRequest(r) - if err != nil { - return err - } - if err := requireOK(resp); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go deleted file mode 100644 index 8c52eb222bf..00000000000 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// Status can be used to query the Status endpoints -type Status struct { - c *Client -} - -// Status returns a handle to the status endpoints -func (c *Client) Status() *Status { - return &Status{c} -} - -// Leader is used to query for a known leader -func (s *Status) LeaderWithQueryOptions(q *QueryOptions) (string, error) { - r := s.c.newRequest("GET", "/v1/status/leader") - - if q != nil { - r.setQueryOptions(q) - } - - _, resp, err := s.c.doRequest(r) - if err != nil { - return "", err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return "", err - } - - var leader string - if err := decodeBody(resp, &leader); err != nil { - return "", err - } - return leader, nil -} - -func (s *Status) Leader() (string, error) { - return s.LeaderWithQueryOptions(nil) -} - -// Peers is used to query for a known raft peers -func (s *Status) PeersWithQueryOptions(q *QueryOptions) ([]string, error) { - r := s.c.newRequest("GET", "/v1/status/peers") - - if q != nil { - r.setQueryOptions(q) - } - - _, resp, err := s.c.doRequest(r) - if err != nil { - return nil, err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return nil, err - } - - var peers []string - if err := decodeBody(resp, &peers); err != nil { - return nil, err - } - return peers, nil -} - -func (s *Status) Peers() ([]string, error) { - return s.PeersWithQueryOptions(nil) -} diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go deleted file mode 100644 index 59adafdac3d..00000000000 --- a/vendor/github.com/hashicorp/consul/api/txn.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" -) - -// Txn is used to manipulate the Txn API -type Txn struct { - c *Client -} - -// Txn is used to return a handle to the K/V apis -func (c *Client) Txn() *Txn { - return &Txn{c} -} - -// TxnOp is the internal format we send to Consul. Currently only K/V and -// check operations are supported. -type TxnOp struct { - KV *KVTxnOp - Node *NodeTxnOp - Service *ServiceTxnOp - Check *CheckTxnOp -} - -// TxnOps is a list of transaction operations. -type TxnOps []*TxnOp - -// TxnResult is the internal format we receive from Consul. -type TxnResult struct { - KV *KVPair - Node *Node - Service *CatalogService - Check *HealthCheck -} - -// TxnResults is a list of TxnResult objects. -type TxnResults []*TxnResult - -// TxnError is used to return information about an operation in a transaction. -type TxnError struct { - OpIndex int - What string -} - -// TxnErrors is a list of TxnError objects. -type TxnErrors []*TxnError - -// TxnResponse is the internal format we receive from Consul. -type TxnResponse struct { - Results TxnResults - Errors TxnErrors -} - -// KVOp constants give possible operations available in a transaction. -type KVOp string - -const ( - KVSet KVOp = "set" - KVDelete KVOp = "delete" - KVDeleteCAS KVOp = "delete-cas" - KVDeleteTree KVOp = "delete-tree" - KVCAS KVOp = "cas" - KVLock KVOp = "lock" - KVUnlock KVOp = "unlock" - KVGet KVOp = "get" - KVGetOrEmpty KVOp = "get-or-empty" - KVGetTree KVOp = "get-tree" - KVCheckSession KVOp = "check-session" - KVCheckIndex KVOp = "check-index" - KVCheckNotExists KVOp = "check-not-exists" -) - -// KVTxnOp defines a single operation inside a transaction. -type KVTxnOp struct { - Verb KVOp - Key string - Value []byte - Flags uint64 - Index uint64 - Session string - Namespace string `json:",omitempty"` - Partition string `json:",omitempty"` -} - -// KVTxnOps defines a set of operations to be performed inside a single -// transaction. -type KVTxnOps []*KVTxnOp - -// KVTxnResponse has the outcome of a transaction. -type KVTxnResponse struct { - Results []*KVPair - Errors TxnErrors -} - -// SessionOp constants give possible operations available in a transaction. -type SessionOp string - -const ( - SessionDelete SessionOp = "delete" -) - -// SessionTxnOp defines a single operation inside a transaction. -type SessionTxnOp struct { - Verb SessionOp - Session Session -} - -// NodeOp constants give possible operations available in a transaction. -type NodeOp string - -const ( - NodeGet NodeOp = "get" - NodeSet NodeOp = "set" - NodeCAS NodeOp = "cas" - NodeDelete NodeOp = "delete" - NodeDeleteCAS NodeOp = "delete-cas" -) - -// NodeTxnOp defines a single operation inside a transaction. -type NodeTxnOp struct { - Verb NodeOp - Node Node -} - -// ServiceOp constants give possible operations available in a transaction. -type ServiceOp string - -const ( - ServiceGet ServiceOp = "get" - ServiceSet ServiceOp = "set" - ServiceCAS ServiceOp = "cas" - ServiceDelete ServiceOp = "delete" - ServiceDeleteCAS ServiceOp = "delete-cas" -) - -// ServiceTxnOp defines a single operation inside a transaction. -type ServiceTxnOp struct { - Verb ServiceOp - Node string - Service AgentService -} - -// CheckOp constants give possible operations available in a transaction. -type CheckOp string - -const ( - CheckGet CheckOp = "get" - CheckSet CheckOp = "set" - CheckCAS CheckOp = "cas" - CheckDelete CheckOp = "delete" - CheckDeleteCAS CheckOp = "delete-cas" -) - -// CheckTxnOp defines a single operation inside a transaction. -type CheckTxnOp struct { - Verb CheckOp - Check HealthCheck -} - -// Txn is used to apply multiple Consul operations in a single, atomic transaction. -// -// Note that Go will perform the required base64 encoding on the values -// automatically because the type is a byte slice. Transactions are defined as a -// list of operations to perform, using the different fields in the TxnOp structure -// to define operations. If any operation fails, none of the changes are applied -// to the state store. -// -// Even though this is generally a write operation, we take a QueryOptions input -// and return a QueryMeta output. If the transaction contains only read ops, then -// Consul will fast-path it to a different endpoint internally which supports -// consistency controls, but not blocking. If there are write operations then -// the request will always be routed through raft and any consistency settings -// will be ignored. -// -// Here's an example: -// -// ops := KVTxnOps{ -// &KVTxnOp{ -// Verb: KVLock, -// Key: "test/lock", -// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", -// Value: []byte("hello"), -// }, -// &KVTxnOp{ -// Verb: KVGet, -// Key: "another/key", -// }, -// &CheckTxnOp{ -// Verb: CheckSet, -// HealthCheck: HealthCheck{ -// Node: "foo", -// CheckID: "redis:a", -// Name: "Redis Health Check", -// Status: "passing", -// }, -// } -// } -// ok, response, _, err := kv.Txn(&ops, nil) -// -// If there is a problem making the transaction request then an error will be -// returned. Otherwise, the ok value will be true if the transaction succeeded -// or false if it was rolled back. The response is a structured return value which -// will have the outcome of the transaction. Its Results member will have entries -// for each operation. For KV operations, Deleted keys will have a nil entry in the -// results, and to save space, the Value of each key in the Results will be nil -// unless the operation is a KVGet. If the transaction was rolled back, the Errors -// member will have entries referencing the index of the operation that failed -// along with an error message. -func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { - return t.c.txn(txn, q) -} - -func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { - r := c.newRequest("PUT", "/v1/txn") - r.setQueryOptions(q) - - r.obj = txn - rtt, resp, err := c.doRequest(r) - if err != nil { - return false, nil, nil, err - } - defer closeResponseBody(resp) - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { - var txnResp TxnResponse - if err := decodeBody(resp, &txnResp); err != nil { - return false, nil, nil, err - } - - return resp.StatusCode == http.StatusOK, &txnResp, qm, nil - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) -} diff --git a/vendor/github.com/hashicorp/consul/api/watch/funcs.go b/vendor/github.com/hashicorp/consul/api/watch/funcs.go deleted file mode 100644 index 0d0f6e100c2..00000000000 --- a/vendor/github.com/hashicorp/consul/api/watch/funcs.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package watch - -import ( - "context" - "fmt" - - consulapi "github.com/hashicorp/consul/api" -) - -// watchFactory is a function that can create a new WatchFunc -// from a parameter configuration -type watchFactory func(params map[string]interface{}) (WatcherFunc, error) - -// watchFuncFactory maps each type to a factory function -var watchFuncFactory map[string]watchFactory - -func init() { - watchFuncFactory = map[string]watchFactory{ - "key": keyWatch, - "keyprefix": keyPrefixWatch, - "services": servicesWatch, - "nodes": nodesWatch, - "service": serviceWatch, - "checks": checksWatch, - "event": eventWatch, - "connect_roots": connectRootsWatch, - "connect_leaf": connectLeafWatch, - "agent_service": agentServiceWatch, - } -} - -// keyWatch is used to return a key watching function -func keyWatch(params map[string]interface{}) (WatcherFunc, error) { - stale := false - if err := assignValueBool(params, "stale", &stale); err != nil { - return nil, err - } - - var key string - if err := assignValue(params, "key", &key); err != nil { - return nil, err - } - if key == "" { - return nil, fmt.Errorf("Must specify a single key to watch") - } - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - kv := p.client.KV() - opts := makeQueryOptionsWithContext(p, stale) - defer p.cancelFunc() - pair, meta, err := kv.Get(key, &opts) - if err != nil { - return nil, nil, err - } - if pair == nil { - return WaitIndexVal(meta.LastIndex), nil, err - } - return WaitIndexVal(meta.LastIndex), pair, err - } - return fn, nil -} - -// keyPrefixWatch is used to return a key prefix watching function -func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) { - stale := false - if err := assignValueBool(params, "stale", &stale); err != nil { - return nil, err - } - - var prefix string - if err := assignValue(params, "prefix", &prefix); err != nil { - return nil, err - } - if prefix == "" { - return nil, fmt.Errorf("Must specify a single prefix to watch") - } - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - kv := p.client.KV() - opts := makeQueryOptionsWithContext(p, stale) - defer p.cancelFunc() - pairs, meta, err := kv.List(prefix, &opts) - if err != nil { - return nil, nil, err - } - return WaitIndexVal(meta.LastIndex), pairs, err - } - return fn, nil -} - -// servicesWatch is used to watch the list of available services -func servicesWatch(params map[string]interface{}) (WatcherFunc, error) { - stale := false - filter := "" - if err := assignValueBool(params, "stale", &stale); err != nil { - return nil, err - } - if err := assignValue(params, "filter", &filter); err != nil { - return nil, err - } - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - catalog := p.client.Catalog() - opts := makeQueryOptionsWithContext(p, stale) - if filter != "" { - opts.Filter = filter - } - defer p.cancelFunc() - services, meta, err := catalog.Services(&opts) - if err != nil { - return nil, nil, err - } - return WaitIndexVal(meta.LastIndex), services, err - } - return fn, nil -} - -// nodesWatch is used to watch the list of available nodes -func nodesWatch(params map[string]interface{}) (WatcherFunc, error) { - stale := false - filter := "" - if err := assignValueBool(params, "stale", &stale); err != nil { - return nil, err - } - if err := assignValue(params, "filter", &filter); err != nil { - return nil, err - } - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - catalog := p.client.Catalog() - opts := makeQueryOptionsWithContext(p, stale) - if filter != "" { - opts.Filter = filter - } - defer p.cancelFunc() - nodes, meta, err := catalog.Nodes(&opts) - if err != nil { - return nil, nil, err - } - return WaitIndexVal(meta.LastIndex), nodes, err - } - return fn, nil -} - -// serviceWatch is used to watch a specific service for changes -func serviceWatch(params map[string]interface{}) (WatcherFunc, error) { - stale := false - filter := "" - if err := assignValueBool(params, "stale", &stale); err != nil { - return nil, err - } - if err := assignValue(params, "filter", &filter); err != nil { - return nil, err - } - - var ( - service string - tags []string - ) - if err := assignValue(params, "service", &service); err != nil { - return nil, err - } - if service == "" { - return nil, fmt.Errorf("Must specify a single service to watch") - } - if err := assignValueStringSlice(params, "tag", &tags); err != nil { - return nil, err - } - - passingOnly := false - if err := assignValueBool(params, "passingonly", &passingOnly); err != nil { - return nil, err - } - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - health := p.client.Health() - opts := makeQueryOptionsWithContext(p, stale) - if filter != "" { - opts.Filter = filter - } - defer p.cancelFunc() - nodes, meta, err := health.ServiceMultipleTags(service, tags, passingOnly, &opts) - if err != nil { - return nil, nil, err - } - return WaitIndexVal(meta.LastIndex), nodes, err - } - return fn, nil -} - -// checksWatch is used to watch a specific checks in a given state -func checksWatch(params map[string]interface{}) (WatcherFunc, error) { - stale := false - if err := assignValueBool(params, "stale", &stale); err != nil { - return nil, err - } - - var service, state, filter string - if err := assignValue(params, "service", &service); err != nil { - return nil, err - } - if err := assignValue(params, "state", &state); err != nil { - return nil, err - } - if err := assignValue(params, "filter", &filter); err != nil { - return nil, err - } - if service != "" && state != "" { - return nil, fmt.Errorf("Cannot specify service and state") - } - if service == "" && state == "" { - state = "any" - } - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - health := p.client.Health() - opts := makeQueryOptionsWithContext(p, stale) - defer p.cancelFunc() - var checks []*consulapi.HealthCheck - var meta *consulapi.QueryMeta - var err error - if filter != "" { - opts.Filter = filter - } - if state != "" { - checks, meta, err = health.State(state, &opts) - } else { - checks, meta, err = health.Checks(service, &opts) - } - if err != nil { - return nil, nil, err - } - return WaitIndexVal(meta.LastIndex), checks, err - } - return fn, nil -} - -// eventWatch is used to watch for events, optionally filtering on name -func eventWatch(params map[string]interface{}) (WatcherFunc, error) { - // The stale setting doesn't apply to events. - - var name string - if err := assignValue(params, "name", &name); err != nil { - return nil, err - } - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - event := p.client.Event() - opts := makeQueryOptionsWithContext(p, false) - defer p.cancelFunc() - events, meta, err := event.List(name, &opts) - if err != nil { - return nil, nil, err - } - - // Prune to only the new events - for i := 0; i < len(events); i++ { - if WaitIndexVal(event.IDToIndex(events[i].ID)).Equal(p.lastParamVal) { - events = events[i+1:] - break - } - } - return WaitIndexVal(meta.LastIndex), events, err - } - return fn, nil -} - -// connectRootsWatch is used to watch for changes to Connect Root certificates. -func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) { - // We don't support stale since roots are cached locally in the agent. - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - agent := p.client.Agent() - opts := makeQueryOptionsWithContext(p, false) - defer p.cancelFunc() - - roots, meta, err := agent.ConnectCARoots(&opts) - if err != nil { - return nil, nil, err - } - - return WaitIndexVal(meta.LastIndex), roots, err - } - return fn, nil -} - -// connectLeafWatch is used to watch for changes to Connect Leaf certificates -// for given local service id. -func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) { - // We don't support stale since certs are cached locally in the agent. - - var serviceName string - if err := assignValue(params, "service", &serviceName); err != nil { - return nil, err - } - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - agent := p.client.Agent() - opts := makeQueryOptionsWithContext(p, false) - defer p.cancelFunc() - - leaf, meta, err := agent.ConnectCALeaf(serviceName, &opts) - if err != nil { - return nil, nil, err - } - - return WaitIndexVal(meta.LastIndex), leaf, err - } - return fn, nil -} - -// agentServiceWatch is used to watch for changes to a single service instance -// on the local agent. Note that this state is agent-local so the watch -// mechanism uses `hash` rather than `index` for deciding whether to block. -func agentServiceWatch(params map[string]interface{}) (WatcherFunc, error) { - // We don't support consistency modes since it's agent local data - - var serviceID string - if err := assignValue(params, "service_id", &serviceID); err != nil { - return nil, err - } - - fn := func(p *Plan) (BlockingParamVal, interface{}, error) { - agent := p.client.Agent() - opts := makeQueryOptionsWithContext(p, false) - defer p.cancelFunc() - - svc, _, err := agent.Service(serviceID, &opts) - if err != nil { - return nil, nil, err - } - - // Return string ContentHash since we don't have Raft indexes to block on. - return WaitHashVal(svc.ContentHash), svc, err - } - return fn, nil -} - -func makeQueryOptionsWithContext(p *Plan, stale bool) consulapi.QueryOptions { - ctx, cancel := context.WithCancel(context.Background()) - p.setCancelFunc(cancel) - opts := consulapi.QueryOptions{AllowStale: stale} - switch param := p.lastParamVal.(type) { - case WaitIndexVal: - opts.WaitIndex = uint64(param) - case WaitHashVal: - opts.WaitHash = string(param) - } - return *opts.WithContext(ctx) -} diff --git a/vendor/github.com/hashicorp/consul/api/watch/plan.go b/vendor/github.com/hashicorp/consul/api/watch/plan.go deleted file mode 100644 index a3588ff184a..00000000000 --- a/vendor/github.com/hashicorp/consul/api/watch/plan.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package watch - -import ( - "context" - "fmt" - "io" - "log" - "reflect" - "time" - - "github.com/hashicorp/go-hclog" - - consulapi "github.com/hashicorp/consul/api" -) - -const ( - // retryInterval is the base retry value - retryInterval = 5 * time.Second - - // maximum back off time, this is to prevent - // exponential runaway - maxBackoffTime = 180 * time.Second - - // Name used with hclog Logger. We do not add this to the logging package - // because we do not want to pull in the root consul module. - watchLoggerName = "watch" -) - -func (p *Plan) Run(address string) error { - return p.RunWithConfig(address, nil) -} - -// Run is used to run a watch plan -func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error { - logger := p.Logger - if logger == nil { - logger = newWatchLogger(p.LogOutput) - } - - // Setup the client - p.address = address - if conf == nil { - conf = consulapi.DefaultConfigWithLogger(logger) - } - conf.Address = address - conf.Datacenter = p.Datacenter - conf.Token = p.Token - client, err := consulapi.NewClient(conf) - if err != nil { - return fmt.Errorf("Failed to connect to agent: %v", err) - } - - return p.RunWithClientAndHclog(client, logger) -} - -// RunWithClientAndLogger runs a watch plan using an external client and -// hclog.Logger instance. Using this, the plan's Datacenter, Token and LogOutput -// fields are ignored and the passed client is expected to be configured as -// needed. -func (p *Plan) RunWithClientAndHclog(client *consulapi.Client, logger hclog.Logger) error { - var watchLogger hclog.Logger - if logger == nil { - watchLogger = newWatchLogger(nil) - } else { - watchLogger = logger.Named(watchLoggerName) - } - - p.client = client - - // Loop until we are canceled - failures := 0 -OUTER: - for !p.shouldStop() { - // Invoke the handler - blockParamVal, result, err := p.Watcher(p) - - // Check if we should terminate since the function - // could have blocked for a while - if p.shouldStop() { - break - } - - // Handle an error in the watch function - if err != nil { - // Perform an exponential backoff - failures++ - if blockParamVal == nil { - p.lastParamVal = nil - } else { - p.lastParamVal = blockParamVal.Next(p.lastParamVal) - } - retry := retryInterval * time.Duration(failures*failures) - if retry > maxBackoffTime { - retry = maxBackoffTime - } - watchLogger.Error("Watch errored", "type", p.Type, "error", err, "retry", retry) - select { - case <-time.After(retry): - continue OUTER - case <-p.stopCh: - return nil - } - } - - // Clear the failures - failures = 0 - - // If the index is unchanged do nothing - if p.lastParamVal != nil && p.lastParamVal.Equal(blockParamVal) { - continue - } - - // Update the index, look for change - oldParamVal := p.lastParamVal - p.lastParamVal = blockParamVal.Next(oldParamVal) - if oldParamVal != nil && reflect.DeepEqual(p.lastResult, result) { - continue - } - - // Handle the updated result - p.lastResult = result - // If a hybrid handler exists use that - if p.HybridHandler != nil { - p.HybridHandler(blockParamVal, result) - } else if p.Handler != nil { - idx, ok := blockParamVal.(WaitIndexVal) - if !ok { - watchLogger.Error("Handler only supports index-based " + - " watches but non index-based watch run. Skipping Handler.") - } - p.Handler(uint64(idx), result) - } - } - return nil -} - -// Deprecated: Use RunwithClientAndHclog -func (p *Plan) RunWithClientAndLogger(client *consulapi.Client, logger *log.Logger) error { - - p.client = client - - // Loop until we are canceled - failures := 0 -OUTER: - for !p.shouldStop() { - // Invoke the handler - blockParamVal, result, err := p.Watcher(p) - - // Check if we should terminate since the function - // could have blocked for a while - if p.shouldStop() { - break - } - - // Handle an error in the watch function - if err != nil { - // Perform an exponential backoff - failures++ - if blockParamVal == nil { - p.lastParamVal = nil - } else { - p.lastParamVal = blockParamVal.Next(p.lastParamVal) - } - retry := retryInterval * time.Duration(failures*failures) - if retry > maxBackoffTime { - retry = maxBackoffTime - } - logger.Printf("[ERR] consul.watch: Watch (type: %s) errored: %v, retry in %v", - p.Type, err, retry) - select { - case <-time.After(retry): - continue OUTER - case <-p.stopCh: - return nil - } - } - - // Clear the failures - failures = 0 - - // If the index is unchanged do nothing - if p.lastParamVal != nil && p.lastParamVal.Equal(blockParamVal) { - continue - } - - // Update the index, look for change - oldParamVal := p.lastParamVal - p.lastParamVal = blockParamVal.Next(oldParamVal) - if oldParamVal != nil && reflect.DeepEqual(p.lastResult, result) { - continue - } - - // Handle the updated result - p.lastResult = result - // If a hybrid handler exists use that - if p.HybridHandler != nil { - p.HybridHandler(blockParamVal, result) - } else if p.Handler != nil { - idx, ok := blockParamVal.(WaitIndexVal) - if !ok { - logger.Printf("[ERR] consul.watch: Handler only supports index-based " + - " watches but non index-based watch run. Skipping Handler.") - } - p.Handler(uint64(idx), result) - } - } - return nil -} - -// Stop is used to stop running the watch plan -func (p *Plan) Stop() { - p.stopLock.Lock() - defer p.stopLock.Unlock() - if p.stop { - return - } - p.stop = true - if p.cancelFunc != nil { - p.cancelFunc() - } - close(p.stopCh) -} - -func (p *Plan) shouldStop() bool { - select { - case <-p.stopCh: - return true - default: - return false - } -} - -func (p *Plan) setCancelFunc(cancel context.CancelFunc) { - p.stopLock.Lock() - defer p.stopLock.Unlock() - if p.shouldStop() { - // The watch is stopped and execute the new cancel func to stop watchFactory - cancel() - return - } - p.cancelFunc = cancel -} - -func (p *Plan) IsStopped() bool { - p.stopLock.Lock() - defer p.stopLock.Unlock() - return p.stop -} - -func newWatchLogger(output io.Writer) hclog.Logger { - return hclog.New(&hclog.LoggerOptions{ - Name: watchLoggerName, - Output: output, - }) -} diff --git a/vendor/github.com/hashicorp/consul/api/watch/watch.go b/vendor/github.com/hashicorp/consul/api/watch/watch.go deleted file mode 100644 index ea00f8ef0cb..00000000000 --- a/vendor/github.com/hashicorp/consul/api/watch/watch.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package watch - -import ( - "context" - "fmt" - "io" - "sync" - "time" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-hclog" - "github.com/mitchellh/mapstructure" -) - -const DefaultTimeout = 10 * time.Second - -// Plan is the parsed version of a watch specification. A watch provides -// the details of a query, which generates a view into the Consul data store. -// This view is watched for changes and a handler is invoked to take any -// appropriate actions. -type Plan struct { - Datacenter string - Token string - Type string - HandlerType string - Exempt map[string]interface{} - - Watcher WatcherFunc - // Handler is kept for backward compatibility but only supports watches based - // on index param. To support hash based watches, set HybridHandler instead. - Handler HandlerFunc - HybridHandler HybridHandlerFunc - - Logger hclog.Logger - // Deprecated: use Logger - LogOutput io.Writer - - address string - client *consulapi.Client - lastParamVal BlockingParamVal - lastResult interface{} - - stop bool - stopCh chan struct{} - stopLock sync.Mutex - cancelFunc context.CancelFunc -} - -type HttpHandlerConfig struct { - Path string `mapstructure:"path"` - Method string `mapstructure:"method"` - Timeout time.Duration `mapstructure:"-"` - TimeoutRaw string `mapstructure:"timeout"` - Header map[string][]string `mapstructure:"header"` - TLSSkipVerify bool `mapstructure:"tls_skip_verify"` -} - -// BlockingParamVal is an interface representing the common operations needed for -// different styles of blocking. It's used to abstract the core watch plan from -// whether we are performing index-based or hash-based blocking. -type BlockingParamVal interface { - // Equal returns whether the other param value should be considered equal - // (i.e. representing no change in the watched resource). Equal must not panic - // if other is nil. - Equal(other BlockingParamVal) bool - - // Next is called when deciding which value to use on the next blocking call. - // It assumes the BlockingParamVal value it is called on is the most recent one - // returned and passes the previous one which may be nil as context. This - // allows types to customize logic around ordering without assuming there is - // an order. For example WaitIndexVal can check that the index didn't go - // backwards and if it did then reset to 0. Most other cases should just - // return themselves (the most recent value) to be used in the next request. - Next(previous BlockingParamVal) BlockingParamVal -} - -// WaitIndexVal is a type representing a Consul index that implements -// BlockingParamVal. -type WaitIndexVal uint64 - -// Equal implements BlockingParamVal -func (idx WaitIndexVal) Equal(other BlockingParamVal) bool { - if otherIdx, ok := other.(WaitIndexVal); ok { - return idx == otherIdx - } - return false -} - -// Next implements BlockingParamVal -func (idx WaitIndexVal) Next(previous BlockingParamVal) BlockingParamVal { - if previous == nil { - return idx - } - prevIdx, ok := previous.(WaitIndexVal) - if ok && prevIdx == idx { - // This value is the same as the previous index, reset - return WaitIndexVal(0) - } - return idx -} - -// WaitHashVal is a type representing a Consul content hash that implements -// BlockingParamVal. -type WaitHashVal string - -// Equal implements BlockingParamVal -func (h WaitHashVal) Equal(other BlockingParamVal) bool { - if otherHash, ok := other.(WaitHashVal); ok { - return h == otherHash - } - return false -} - -// Next implements BlockingParamVal -func (h WaitHashVal) Next(previous BlockingParamVal) BlockingParamVal { - return h -} - -// WatcherFunc is used to watch for a diff. -type WatcherFunc func(*Plan) (BlockingParamVal, interface{}, error) - -// HandlerFunc is used to handle new data. It only works for index-based watches -// (which is almost all end points currently) and is kept for backwards -// compatibility until more places can make use of hash-based watches too. -type HandlerFunc func(uint64, interface{}) - -// HybridHandlerFunc is used to handle new data. It can support either -// index-based or hash-based watches via the BlockingParamVal. -type HybridHandlerFunc func(BlockingParamVal, interface{}) - -// Parse takes a watch query and compiles it into a WatchPlan or an error -func Parse(params map[string]interface{}) (*Plan, error) { - return ParseExempt(params, nil) -} - -// ParseExempt takes a watch query and compiles it into a WatchPlan or an error -// Any exempt parameters are stored in the Exempt map -func ParseExempt(params map[string]interface{}, exempt []string) (*Plan, error) { - plan := &Plan{ - stopCh: make(chan struct{}), - Exempt: make(map[string]interface{}), - } - - // Parse the generic parameters - if err := assignValue(params, "datacenter", &plan.Datacenter); err != nil { - return nil, err - } - if err := assignValue(params, "token", &plan.Token); err != nil { - return nil, err - } - if err := assignValue(params, "type", &plan.Type); err != nil { - return nil, err - } - // Ensure there is a watch type - if plan.Type == "" { - return nil, fmt.Errorf("Watch type must be specified") - } - - // Get the specific handler - if err := assignValue(params, "handler_type", &plan.HandlerType); err != nil { - return nil, err - } - switch plan.HandlerType { - case "http": - if _, ok := params["http_handler_config"]; !ok { - return nil, fmt.Errorf("Handler type 'http' requires 'http_handler_config' to be set") - } - config, err := parseHttpHandlerConfig(params["http_handler_config"]) - if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("Failed to parse 'http_handler_config': %v", err)) - } - plan.Exempt["http_handler_config"] = config - delete(params, "http_handler_config") - - case "script": - // Let the caller check for configuration in exempt parameters - } - - // Look for a factory function - factory := watchFuncFactory[plan.Type] - if factory == nil { - return nil, fmt.Errorf("Unsupported watch type: %s", plan.Type) - } - - // Get the watch func - fn, err := factory(params) - if err != nil { - return nil, err - } - plan.Watcher = fn - - // Remove the exempt parameters - if len(exempt) > 0 { - for _, ex := range exempt { - val, ok := params[ex] - if ok { - plan.Exempt[ex] = val - delete(params, ex) - } - } - } - - // Ensure all parameters are consumed - if len(params) != 0 { - var bad []string - for key := range params { - bad = append(bad, key) - } - return nil, fmt.Errorf("Invalid parameters: %v", bad) - } - return plan, nil -} - -// assignValue is used to extract a value ensuring it is a string -func assignValue(params map[string]interface{}, name string, out *string) error { - if raw, ok := params[name]; ok { - val, ok := raw.(string) - if !ok { - return fmt.Errorf("Expecting %s to be a string", name) - } - *out = val - delete(params, name) - } - return nil -} - -// assignValueBool is used to extract a value ensuring it is a bool -func assignValueBool(params map[string]interface{}, name string, out *bool) error { - if raw, ok := params[name]; ok { - val, ok := raw.(bool) - if !ok { - return fmt.Errorf("Expecting %s to be a boolean", name) - } - *out = val - delete(params, name) - } - return nil -} - -// assignValueStringSlice is used to extract a value ensuring it is either a string or a slice of strings -func assignValueStringSlice(params map[string]interface{}, name string, out *[]string) error { - if raw, ok := params[name]; ok { - var tmp []string - switch raw.(type) { - case string: - tmp = make([]string, 1, 1) - tmp[0] = raw.(string) - case []string: - l := len(raw.([]string)) - tmp = make([]string, l, l) - copy(tmp, raw.([]string)) - case []interface{}: - l := len(raw.([]interface{})) - tmp = make([]string, l, l) - for i, v := range raw.([]interface{}) { - if s, ok := v.(string); ok { - tmp[i] = s - } else { - return fmt.Errorf("Index %d of %s expected to be string", i, name) - } - } - default: - return fmt.Errorf("Expecting %s to be a string or []string", name) - } - *out = tmp - delete(params, name) - } - return nil -} - -// Parse the 'http_handler_config' parameters -func parseHttpHandlerConfig(configParams interface{}) (*HttpHandlerConfig, error) { - var config HttpHandlerConfig - if err := mapstructure.Decode(configParams, &config); err != nil { - return nil, err - } - - if config.Path == "" { - return nil, fmt.Errorf("Requires 'path' to be set") - } - if config.Method == "" { - config.Method = "POST" - } - if config.TimeoutRaw == "" { - config.Timeout = DefaultTimeout - } else if timeout, err := time.ParseDuration(config.TimeoutRaw); err != nil { - return nil, fmt.Errorf(fmt.Sprintf("Failed to parse timeout: %v", err)) - } else { - config.Timeout = timeout - } - - return &config, nil -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e5313fc8..00000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index fe28d15b6f9..00000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,58 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "runtime" - "time" -) - -// DefaultTransport returns a new http.Transport with similar default values to -// http.DefaultTransport, but with idle connections and keepalives disabled. -func DefaultTransport() *http.Transport { - transport := DefaultPooledTransport() - transport.DisableKeepAlives = true - transport.MaxIdleConnsPerHost = -1 - return transport -} - -// DefaultPooledTransport returns a new http.Transport with similar default -// values to http.DefaultTransport. Do not use this for transient transports as -// it can leak file descriptors over time. Only use this for transports that -// will be re-used for the same host(s). -func DefaultPooledTransport() *http.Transport { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - ForceAttemptHTTP2: true, - MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, - } - return transport -} - -// DefaultClient returns a new http.Client with similar default values to -// http.Client, but with a non-shared Transport, idle connections disabled, and -// keepalives disabled. -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} - -// DefaultPooledClient returns a new http.Client with similar default values to -// http.Client, but with a shared Transport. Do not use this function for -// transient clients as it can leak file descriptors over time. Only use this -// for clients that will be re-used for the same host(s). -func DefaultPooledClient() *http.Client { - return &http.Client{ - Transport: DefaultPooledTransport(), - } -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go deleted file mode 100644 index 05841092a7b..00000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package cleanhttp offers convenience utilities for acquiring "clean" -// http.Transport and http.Client structs. -// -// Values set on http.DefaultClient and http.DefaultTransport affect all -// callers. This can have detrimental effects, esepcially in TLS contexts, -// where client or root certificates set to talk to multiple endpoints can end -// up displacing each other, leading to hard-to-debug issues. This package -// provides non-shared http.Client and http.Transport structs to ensure that -// the configuration will not be overwritten by other parts of the application -// or dependencies. -// -// The DefaultClient and DefaultTransport functions disable idle connections -// and keepalives. Without ensuring that idle connections are closed before -// garbage collection, short-term clients/transports can leak file descriptors, -// eventually leading to "too many open files" errors. If you will be -// connecting to the same hosts repeatedly from the same client, you can use -// DefaultPooledClient to receive a client that has connection pooling -// semantics similar to http.DefaultClient. -// -package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go deleted file mode 100644 index 3c845dc0dc6..00000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go +++ /dev/null @@ -1,48 +0,0 @@ -package cleanhttp - -import ( - "net/http" - "strings" - "unicode" -) - -// HandlerInput provides input options to cleanhttp's handlers -type HandlerInput struct { - ErrStatus int -} - -// PrintablePathCheckHandler is a middleware that ensures the request path -// contains only printable runes. -func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { - // Nil-check on input to make it optional - if input == nil { - input = &HandlerInput{ - ErrStatus: http.StatusBadRequest, - } - } - - // Default to http.StatusBadRequest on error - if input.ErrStatus == 0 { - input.ErrStatus = http.StatusBadRequest - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r != nil { - // Check URL path for non-printable characters - idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { - return !unicode.IsPrint(c) - }) - - if idx != -1 { - w.WriteHeader(input.ErrStatus) - return - } - - if next != nil { - next.ServeHTTP(w, r) - } - } - - return - }) -} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore deleted file mode 100644 index daf913b1b34..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md deleted file mode 100644 index 86c6d03fbaa..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md +++ /dev/null @@ -1,23 +0,0 @@ -# UNRELEASED - -# 1.3.0 (September 17th, 2020) - -FEATURES - -* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] - -# 1.2.0 (March 18th, 2020) - -FEATURES - -* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] - -# 1.1.0 (May 22nd, 2019) - -FEATURES - -* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] - -# 1.0.0 (August 30th, 2018) - -* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md deleted file mode 100644 index aca15a64212..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/README.md +++ /dev/null @@ -1,66 +0,0 @@ -go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) -========= - -Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -A tree supports using a transaction to batch multiple updates (insert, delete) -in a more efficient manner than performing each operation one at a time. - -For a mutable variant, see [go-radix](https://github.com/armon/go-radix). - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := iradix.New() -r, _, _ = r.Insert([]byte("foo"), 1) -r, _, _ = r.Insert([]byte("bar"), 2) -r, _, _ = r.Insert([]byte("foobar"), 2) - -// Find the longest prefix match -m, _, _ := r.Root().LongestPrefix([]byte("foozip")) -if string(m) != "foo" { - panic("should be foo") -} -``` - -Here is an example of performing a range scan of the keys. - -```go -// Create a tree -r := iradix.New() -r, _, _ = r.Insert([]byte("001"), 1) -r, _, _ = r.Insert([]byte("002"), 2) -r, _, _ = r.Insert([]byte("005"), 5) -r, _, _ = r.Insert([]byte("010"), 10) -r, _, _ = r.Insert([]byte("100"), 10) - -// Range scan over the keys that sort lexicographically between [003, 050) -it := r.Root().Iterator() -it.SeekLowerBound([]byte("003")) -for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { - if key >= "050" { - break - } - fmt.Println(key) -} -// Output: -// 005 -// 010 -``` - diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go deleted file mode 100644 index a63674775f2..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/edges.go +++ /dev/null @@ -1,21 +0,0 @@ -package iradix - -import "sort" - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go deleted file mode 100644 index 168bda76dfb..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go +++ /dev/null @@ -1,676 +0,0 @@ -package iradix - -import ( - "bytes" - "strings" - - "github.com/hashicorp/golang-lru/simplelru" -) - -const ( - // defaultModifiedCache is the default size of the modified node - // cache used per transaction. This is used to cache the updates - // to the nodes near the root, while the leaves do not need to be - // cached. This is important for very large transactions to prevent - // the modified cache from growing to be enormous. This is also used - // to set the max size of the mutation notify maps since those should - // also be bounded in a similar way. - defaultModifiedCache = 8192 -) - -// Tree implements an immutable radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over a standard -// hash map is prefix-based lookups and ordered iteration. The immutability -// means that it is safe to concurrently read from a Tree without any -// coordination. -type Tree struct { - root *Node - size int -} - -// New returns an empty Tree -func New() *Tree { - t := &Tree{ - root: &Node{ - mutateCh: make(chan struct{}), - }, - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// Txn is a transaction on the tree. This transaction is applied -// atomically and returns a new tree when committed. A transaction -// is not thread safe, and should only be used by a single goroutine. -type Txn struct { - // root is the modified root for the transaction. - root *Node - - // snap is a snapshot of the root node for use if we have to run the - // slow notify algorithm. - snap *Node - - // size tracks the size of the tree as it is modified during the - // transaction. - size int - - // writable is a cache of writable nodes that have been created during - // the course of the transaction. This allows us to re-use the same - // nodes for further writes and avoid unnecessary copies of nodes that - // have never been exposed outside the transaction. This will only hold - // up to defaultModifiedCache number of entries. - writable *simplelru.LRU - - // trackChannels is used to hold channels that need to be notified to - // signal mutation of the tree. This will only hold up to - // defaultModifiedCache number of entries, after which we will set the - // trackOverflow flag, which will cause us to use a more expensive - // algorithm to perform the notifications. Mutation tracking is only - // performed if trackMutate is true. - trackChannels map[chan struct{}]struct{} - trackOverflow bool - trackMutate bool -} - -// Txn starts a new transaction that can be used to mutate the tree -func (t *Tree) Txn() *Txn { - txn := &Txn{ - root: t.root, - snap: t.root, - size: t.size, - } - return txn -} - -// Clone makes an independent copy of the transaction. The new transaction -// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. -func (t *Txn) Clone() *Txn { - // reset the writable node cache to avoid leaking future writes into the clone - t.writable = nil - - txn := &Txn{ - root: t.root, - snap: t.snap, - size: t.size, - } - return txn -} - -// TrackMutate can be used to toggle if mutations are tracked. If this is enabled -// then notifications will be issued for affected internal nodes and leaves when -// the transaction is committed. -func (t *Txn) TrackMutate(track bool) { - t.trackMutate = track -} - -// trackChannel safely attempts to track the given mutation channel, setting the -// overflow flag if we can no longer track any more. This limits the amount of -// state that will accumulate during a transaction and we have a slower algorithm -// to switch to if we overflow. -func (t *Txn) trackChannel(ch chan struct{}) { - // In overflow, make sure we don't store any more objects. - if t.trackOverflow { - return - } - - // If this would overflow the state we reject it and set the flag (since - // we aren't tracking everything that's required any longer). - if len(t.trackChannels) >= defaultModifiedCache { - // Mark that we are in the overflow state - t.trackOverflow = true - - // Clear the map so that the channels can be garbage collected. It is - // safe to do this since we have already overflowed and will be using - // the slow notify algorithm. - t.trackChannels = nil - return - } - - // Create the map on the fly when we need it. - if t.trackChannels == nil { - t.trackChannels = make(map[chan struct{}]struct{}) - } - - // Otherwise we are good to track it. - t.trackChannels[ch] = struct{}{} -} - -// writeNode returns a node to be modified, if the current node has already been -// modified during the course of the transaction, it is used in-place. Set -// forLeafUpdate to true if you are getting a write node to update the leaf, -// which will set leaf mutation tracking appropriately as well. -func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { - // Ensure the writable set exists. - if t.writable == nil { - lru, err := simplelru.NewLRU(defaultModifiedCache, nil) - if err != nil { - panic(err) - } - t.writable = lru - } - - // If this node has already been modified, we can continue to use it - // during this transaction. We know that we don't need to track it for - // a node update since the node is writable, but if this is for a leaf - // update we track it, in case the initial write to this node didn't - // update the leaf. - if _, ok := t.writable.Get(n); ok { - if t.trackMutate && forLeafUpdate && n.leaf != nil { - t.trackChannel(n.leaf.mutateCh) - } - return n - } - - // Mark this node as being mutated. - if t.trackMutate { - t.trackChannel(n.mutateCh) - } - - // Mark its leaf as being mutated, if appropriate. - if t.trackMutate && forLeafUpdate && n.leaf != nil { - t.trackChannel(n.leaf.mutateCh) - } - - // Copy the existing node. If you have set forLeafUpdate it will be - // safe to replace this leaf with another after you get your node for - // writing. You MUST replace it, because the channel associated with - // this leaf will be closed when this transaction is committed. - nc := &Node{ - mutateCh: make(chan struct{}), - leaf: n.leaf, - } - if n.prefix != nil { - nc.prefix = make([]byte, len(n.prefix)) - copy(nc.prefix, n.prefix) - } - if len(n.edges) != 0 { - nc.edges = make([]edge, len(n.edges)) - copy(nc.edges, n.edges) - } - - // Mark this node as writable. - t.writable.Add(nc, nil) - return nc -} - -// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction -// Returns the size of the subtree visited -func (t *Txn) trackChannelsAndCount(n *Node) int { - // Count only leaf nodes - leaves := 0 - if n.leaf != nil { - leaves = 1 - } - // Mark this node as being mutated. - if t.trackMutate { - t.trackChannel(n.mutateCh) - } - - // Mark its leaf as being mutated, if appropriate. - if t.trackMutate && n.leaf != nil { - t.trackChannel(n.leaf.mutateCh) - } - - // Recurse on the children - for _, e := range n.edges { - leaves += t.trackChannelsAndCount(e.node) - } - return leaves -} - -// mergeChild is called to collapse the given node with its child. This is only -// called when the given node is not a leaf and has a single edge. -func (t *Txn) mergeChild(n *Node) { - // Mark the child node as being mutated since we are about to abandon - // it. We don't need to mark the leaf since we are retaining it if it - // is there. - e := n.edges[0] - child := e.node - if t.trackMutate { - t.trackChannel(child.mutateCh) - } - - // Merge the nodes. - n.prefix = concat(n.prefix, child.prefix) - n.leaf = child.leaf - if len(child.edges) != 0 { - n.edges = make([]edge, len(child.edges)) - copy(n.edges, child.edges) - } else { - n.edges = nil - } -} - -// insert does a recursive insertion -func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { - // Handle key exhaustion - if len(search) == 0 { - var oldVal interface{} - didUpdate := false - if n.isLeaf() { - oldVal = n.leaf.val - didUpdate = true - } - - nc := t.writeNode(n, true) - nc.leaf = &leafNode{ - mutateCh: make(chan struct{}), - key: k, - val: v, - } - return nc, oldVal, didUpdate - } - - // Look for the edge - idx, child := n.getEdge(search[0]) - - // No edge, create one - if child == nil { - e := edge{ - label: search[0], - node: &Node{ - mutateCh: make(chan struct{}), - leaf: &leafNode{ - mutateCh: make(chan struct{}), - key: k, - val: v, - }, - prefix: search, - }, - } - nc := t.writeNode(n, false) - nc.addEdge(e) - return nc, nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, child.prefix) - if commonPrefix == len(child.prefix) { - search = search[commonPrefix:] - newChild, oldVal, didUpdate := t.insert(child, k, search, v) - if newChild != nil { - nc := t.writeNode(n, false) - nc.edges[idx].node = newChild - return nc, oldVal, didUpdate - } - return nil, oldVal, didUpdate - } - - // Split the node - nc := t.writeNode(n, false) - splitNode := &Node{ - mutateCh: make(chan struct{}), - prefix: search[:commonPrefix], - } - nc.replaceEdge(edge{ - label: search[0], - node: splitNode, - }) - - // Restore the existing child node - modChild := t.writeNode(child, false) - splitNode.addEdge(edge{ - label: modChild.prefix[commonPrefix], - node: modChild, - }) - modChild.prefix = modChild.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - mutateCh: make(chan struct{}), - key: k, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - splitNode.leaf = leaf - return nc, nil, false - } - - // Create a new edge for the node - splitNode.addEdge(edge{ - label: search[0], - node: &Node{ - mutateCh: make(chan struct{}), - leaf: leaf, - prefix: search, - }, - }) - return nc, nil, false -} - -// delete does a recursive deletion -func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { - // Check for key exhaustion - if len(search) == 0 { - if !n.isLeaf() { - return nil, nil - } - // Copy the pointer in case we are in a transaction that already - // modified this node since the node will be reused. Any changes - // made to the node will not affect returning the original leaf - // value. - oldLeaf := n.leaf - - // Remove the leaf node - nc := t.writeNode(n, true) - nc.leaf = nil - - // Check if this node should be merged - if n != t.root && len(nc.edges) == 1 { - t.mergeChild(nc) - } - return nc, oldLeaf - } - - // Look for an edge - label := search[0] - idx, child := n.getEdge(label) - if child == nil || !bytes.HasPrefix(search, child.prefix) { - return nil, nil - } - - // Consume the search prefix - search = search[len(child.prefix):] - newChild, leaf := t.delete(n, child, search) - if newChild == nil { - return nil, nil - } - - // Copy this node. WATCH OUT - it's safe to pass "false" here because we - // will only ADD a leaf via nc.mergeChild() if there isn't one due to - // the !nc.isLeaf() check in the logic just below. This is pretty subtle, - // so be careful if you change any of the logic here. - nc := t.writeNode(n, false) - - // Delete the edge if the node has no edges - if newChild.leaf == nil && len(newChild.edges) == 0 { - nc.delEdge(label) - if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { - t.mergeChild(nc) - } - } else { - nc.edges[idx].node = newChild - } - return nc, leaf -} - -// delete does a recursive deletion -func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { - // Check for key exhaustion - if len(search) == 0 { - nc := t.writeNode(n, true) - if n.isLeaf() { - nc.leaf = nil - } - nc.edges = nil - return nc, t.trackChannelsAndCount(n) - } - - // Look for an edge - label := search[0] - idx, child := n.getEdge(label) - // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix - // Need to do both so that we can delete prefixes that don't correspond to any node in the tree - if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { - return nil, 0 - } - - // Consume the search prefix - if len(child.prefix) > len(search) { - search = []byte("") - } else { - search = search[len(child.prefix):] - } - newChild, numDeletions := t.deletePrefix(n, child, search) - if newChild == nil { - return nil, 0 - } - // Copy this node. WATCH OUT - it's safe to pass "false" here because we - // will only ADD a leaf via nc.mergeChild() if there isn't one due to - // the !nc.isLeaf() check in the logic just below. This is pretty subtle, - // so be careful if you change any of the logic here. - - nc := t.writeNode(n, false) - - // Delete the edge if the node has no edges - if newChild.leaf == nil && len(newChild.edges) == 0 { - nc.delEdge(label) - if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { - t.mergeChild(nc) - } - } else { - nc.edges[idx].node = newChild - } - return nc, numDeletions -} - -// Insert is used to add or update a given key. The return provides -// the previous value and a bool indicating if any was set. -func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { - newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) - if newRoot != nil { - t.root = newRoot - } - if !didUpdate { - t.size++ - } - return oldVal, didUpdate -} - -// Delete is used to delete a given key. Returns the old value if any, -// and a bool indicating if the key was set. -func (t *Txn) Delete(k []byte) (interface{}, bool) { - newRoot, leaf := t.delete(nil, t.root, k) - if newRoot != nil { - t.root = newRoot - } - if leaf != nil { - t.size-- - return leaf.val, true - } - return nil, false -} - -// DeletePrefix is used to delete an entire subtree that matches the prefix -// This will delete all nodes under that prefix -func (t *Txn) DeletePrefix(prefix []byte) bool { - newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) - if newRoot != nil { - t.root = newRoot - t.size = t.size - numDeletions - return true - } - return false - -} - -// Root returns the current root of the radix tree within this -// transaction. The root is not safe across insert and delete operations, -// but can be used to read the current state during a transaction. -func (t *Txn) Root() *Node { - return t.root -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Txn) Get(k []byte) (interface{}, bool) { - return t.root.Get(k) -} - -// GetWatch is used to lookup a specific key, returning -// the watch channel, value and if it was found -func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { - return t.root.GetWatch(k) -} - -// Commit is used to finalize the transaction and return a new tree. If mutation -// tracking is turned on then notifications will also be issued. -func (t *Txn) Commit() *Tree { - nt := t.CommitOnly() - if t.trackMutate { - t.Notify() - } - return nt -} - -// CommitOnly is used to finalize the transaction and return a new tree, but -// does not issue any notifications until Notify is called. -func (t *Txn) CommitOnly() *Tree { - nt := &Tree{t.root, t.size} - t.writable = nil - return nt -} - -// slowNotify does a complete comparison of the before and after trees in order -// to trigger notifications. This doesn't require any additional state but it -// is very expensive to compute. -func (t *Txn) slowNotify() { - snapIter := t.snap.rawIterator() - rootIter := t.root.rawIterator() - for snapIter.Front() != nil || rootIter.Front() != nil { - // If we've exhausted the nodes in the old snapshot, we know - // there's nothing remaining to notify. - if snapIter.Front() == nil { - return - } - snapElem := snapIter.Front() - - // If we've exhausted the nodes in the new root, we know we need - // to invalidate everything that remains in the old snapshot. We - // know from the loop condition there's something in the old - // snapshot. - if rootIter.Front() == nil { - close(snapElem.mutateCh) - if snapElem.isLeaf() { - close(snapElem.leaf.mutateCh) - } - snapIter.Next() - continue - } - - // Do one string compare so we can check the various conditions - // below without repeating the compare. - cmp := strings.Compare(snapIter.Path(), rootIter.Path()) - - // If the snapshot is behind the root, then we must have deleted - // this node during the transaction. - if cmp < 0 { - close(snapElem.mutateCh) - if snapElem.isLeaf() { - close(snapElem.leaf.mutateCh) - } - snapIter.Next() - continue - } - - // If the snapshot is ahead of the root, then we must have added - // this node during the transaction. - if cmp > 0 { - rootIter.Next() - continue - } - - // If we have the same path, then we need to see if we mutated a - // node and possibly the leaf. - rootElem := rootIter.Front() - if snapElem != rootElem { - close(snapElem.mutateCh) - if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { - close(snapElem.leaf.mutateCh) - } - } - snapIter.Next() - rootIter.Next() - } -} - -// Notify is used along with TrackMutate to trigger notifications. This must -// only be done once a transaction is committed via CommitOnly, and it is called -// automatically by Commit. -func (t *Txn) Notify() { - if !t.trackMutate { - return - } - - // If we've overflowed the tracking state we can't use it in any way and - // need to do a full tree compare. - if t.trackOverflow { - t.slowNotify() - } else { - for ch := range t.trackChannels { - close(ch) - } - } - - // Clean up the tracking state so that a re-notify is safe (will trigger - // the else clause above which will be a no-op). - t.trackChannels = nil - t.trackOverflow = false -} - -// Insert is used to add or update a given key. The return provides -// the new tree, previous value and a bool indicating if any was set. -func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { - txn := t.Txn() - old, ok := txn.Insert(k, v) - return txn.Commit(), old, ok -} - -// Delete is used to delete a given key. Returns the new tree, -// old value if any, and a bool indicating if the key was set. -func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { - txn := t.Txn() - old, ok := txn.Delete(k) - return txn.Commit(), old, ok -} - -// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, -// and a bool indicating if the prefix matched any nodes -func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { - txn := t.Txn() - ok := txn.DeletePrefix(k) - return txn.Commit(), ok -} - -// Root returns the root node of the tree which can be used for richer -// query operations. -func (t *Tree) Root() *Node { - return t.root -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(k []byte) (interface{}, bool) { - return t.root.Get(k) -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 []byte) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// concat two byte slices, returning a third new copy -func concat(a, b []byte) []byte { - c := make([]byte, len(a)+len(b)) - copy(c, a) - copy(c[len(a):], b) - return c -} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go deleted file mode 100644 index f17d0a644f4..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go +++ /dev/null @@ -1,205 +0,0 @@ -package iradix - -import ( - "bytes" -) - -// Iterator is used to iterate over a set of nodes -// in pre-order -type Iterator struct { - node *Node - stack []edges -} - -// SeekPrefixWatch is used to seek the iterator to a given prefix -// and returns the watch channel of the finest granularity -func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { - // Wipe the stack - i.stack = nil - n := i.node - watch = n.mutateCh - search := prefix - for { - // Check for key exhaustion - if len(search) == 0 { - i.node = n - return - } - - // Look for an edge - _, n = n.getEdge(search[0]) - if n == nil { - i.node = nil - return - } - - // Update to the finest granularity as the search makes progress - watch = n.mutateCh - - // Consume the search prefix - if bytes.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if bytes.HasPrefix(n.prefix, search) { - i.node = n - return - } else { - i.node = nil - return - } - } -} - -// SeekPrefix is used to seek the iterator to a given prefix -func (i *Iterator) SeekPrefix(prefix []byte) { - i.SeekPrefixWatch(prefix) -} - -func (i *Iterator) recurseMin(n *Node) *Node { - // Traverse to the minimum child - if n.leaf != nil { - return n - } - nEdges := len(n.edges) - if nEdges > 1 { - // Add all the other edges to the stack (the min node will be added as - // we recurse) - i.stack = append(i.stack, n.edges[1:]) - } - if nEdges > 0 { - return i.recurseMin(n.edges[0].node) - } - // Shouldn't be possible - return nil -} - -// SeekLowerBound is used to seek the iterator to the smallest key that is -// greater or equal to the given key. There is no watch variant as it's hard to -// predict based on the radix structure which node(s) changes might affect the -// result. -func (i *Iterator) SeekLowerBound(key []byte) { - // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we - // go because we need only a subset of edges of many nodes in the path to the - // leaf with the lower bound. Note that the iterator will still recurse into - // children that we don't traverse on the way to the reverse lower bound as it - // walks the stack. - i.stack = []edges{} - // i.node starts off in the common case as pointing to the root node of the - // tree. By the time we return we have either found a lower bound and setup - // the stack to traverse all larger keys, or we have not and the stack and - // node should both be nil to prevent the iterator from assuming it is just - // iterating the whole tree from the root node. Either way this needs to end - // up as nil so just set it here. - n := i.node - i.node = nil - search := key - - found := func(n *Node) { - i.stack = append(i.stack, edges{edge{node: n}}) - } - - findMin := func(n *Node) { - n = i.recurseMin(n) - if n != nil { - found(n) - return - } - } - - for { - // Compare current prefix with the search key's same-length prefix. - var prefixCmp int - if len(n.prefix) < len(search) { - prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) - } else { - prefixCmp = bytes.Compare(n.prefix, search) - } - - if prefixCmp > 0 { - // Prefix is larger, that means the lower bound is greater than the search - // and from now on we need to follow the minimum path to the smallest - // leaf under this subtree. - findMin(n) - return - } - - if prefixCmp < 0 { - // Prefix is smaller than search prefix, that means there is no lower - // bound - i.node = nil - return - } - - // Prefix is equal, we are still heading for an exact match. If this is a - // leaf and an exact match we're done. - if n.leaf != nil && bytes.Equal(n.leaf.key, key) { - found(n) - return - } - - // Consume the search prefix if the current node has one. Note that this is - // safe because if n.prefix is longer than the search slice prefixCmp would - // have been > 0 above and the method would have already returned. - search = search[len(n.prefix):] - - if len(search) == 0 { - // We've exhausted the search key, but the current node is not an exact - // match or not a leaf. That means that the leaf value if it exists, and - // all child nodes must be strictly greater, the smallest key in this - // subtree must be the lower bound. - findMin(n) - return - } - - // Otherwise, take the lower bound next edge. - idx, lbNode := n.getLowerBoundEdge(search[0]) - if lbNode == nil { - return - } - - // Create stack edges for the all strictly higher edges in this node. - if idx+1 < len(n.edges) { - i.stack = append(i.stack, n.edges[idx+1:]) - } - - // Recurse - n = lbNode - } -} - -// Next returns the next node in order -func (i *Iterator) Next() ([]byte, interface{}, bool) { - // Initialize our stack if needed - if i.stack == nil && i.node != nil { - i.stack = []edges{ - { - edge{node: i.node}, - }, - } - } - - for len(i.stack) > 0 { - // Inspect the last element of the stack - n := len(i.stack) - last := i.stack[n-1] - elem := last[0].node - - // Update the stack - if len(last) > 1 { - i.stack[n-1] = last[1:] - } else { - i.stack = i.stack[:n-1] - } - - // Push the edges onto the frontier - if len(elem.edges) > 0 { - i.stack = append(i.stack, elem.edges) - } - - // Return the leaf values if any - if elem.leaf != nil { - return elem.leaf.key, elem.leaf.val, true - } - } - return nil, nil, false -} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go deleted file mode 100644 index 35985480872..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/node.go +++ /dev/null @@ -1,334 +0,0 @@ -package iradix - -import ( - "bytes" - "sort" -) - -// WalkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type WalkFn func(k []byte, v interface{}) bool - -// leafNode is used to represent a value -type leafNode struct { - mutateCh chan struct{} - key []byte - val interface{} -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *Node -} - -// Node is an immutable node in the radix tree -type Node struct { - // mutateCh is closed if this node is modified - mutateCh chan struct{} - - // leaf is used to store possible leaf - leaf *leafNode - - // prefix is the common prefix we ignore - prefix []byte - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *Node) isLeaf() bool { - return n.leaf != nil -} - -func (n *Node) addEdge(e edge) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= e.label - }) - n.edges = append(n.edges, e) - if idx != num { - copy(n.edges[idx+1:], n.edges[idx:num]) - n.edges[idx] = e - } -} - -func (n *Node) replaceEdge(e edge) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= e.label - }) - if idx < num && n.edges[idx].label == e.label { - n.edges[idx].node = e.node - return - } - panic("replacing missing edge") -} - -func (n *Node) getEdge(label byte) (int, *Node) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return idx, n.edges[idx].node - } - return -1, nil -} - -func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - // we want lower bound behavior so return even if it's not an exact match - if idx < num { - return idx, n.edges[idx].node - } - return -1, nil -} - -func (n *Node) delEdge(label byte) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - copy(n.edges[idx:], n.edges[idx+1:]) - n.edges[len(n.edges)-1] = edge{} - n.edges = n.edges[:len(n.edges)-1] - } -} - -func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { - search := k - watch := n.mutateCh - for { - // Check for key exhaustion - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.mutateCh, n.leaf.val, true - } - break - } - - // Look for an edge - _, n = n.getEdge(search[0]) - if n == nil { - break - } - - // Update to the finest granularity as the search makes progress - watch = n.mutateCh - - // Consume the search prefix - if bytes.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return watch, nil, false -} - -func (n *Node) Get(k []byte) (interface{}, bool) { - _, val, ok := n.GetWatch(k) - return val, ok -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { - var last *leafNode - search := k - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - _, n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if bytes.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return nil, nil, false -} - -// Minimum is used to return the minimum value in the tree -func (n *Node) Minimum() ([]byte, interface{}, bool) { - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return nil, nil, false -} - -// Maximum is used to return the maximum value in the tree -func (n *Node) Maximum() ([]byte, interface{}, bool) { - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } else { - break - } - } - return nil, nil, false -} - -// Iterator is used to return an iterator at -// the given node to walk the tree -func (n *Node) Iterator() *Iterator { - return &Iterator{node: n} -} - -// ReverseIterator is used to return an iterator at -// the given node to walk the tree backwards -func (n *Node) ReverseIterator() *ReverseIterator { - return NewReverseIterator(n) -} - -// rawIterator is used to return a raw iterator at the given node to walk the -// tree. -func (n *Node) rawIterator() *rawIterator { - iter := &rawIterator{node: n} - iter.Next() - return iter -} - -// Walk is used to walk the tree -func (n *Node) Walk(fn WalkFn) { - recursiveWalk(n, fn) -} - -// WalkBackwards is used to walk the tree in reverse order -func (n *Node) WalkBackwards(fn WalkFn) { - reverseRecursiveWalk(n, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - _, n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if bytes.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if bytes.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (n *Node) WalkPath(path []byte, fn WalkFn) { - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - _, n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if bytes.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *Node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// reverseRecursiveWalk is used to do a reverse pre-order -// walk of a node recursively. Returns true if the walk -// should be aborted -func reverseRecursiveWalk(n *Node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children in reverse order - for i := len(n.edges) - 1; i >= 0; i-- { - e := n.edges[i] - if reverseRecursiveWalk(e.node, fn) { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go deleted file mode 100644 index 3c6a22525c8..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go +++ /dev/null @@ -1,78 +0,0 @@ -package iradix - -// rawIterator visits each of the nodes in the tree, even the ones that are not -// leaves. It keeps track of the effective path (what a leaf at a given node -// would be called), which is useful for comparing trees. -type rawIterator struct { - // node is the starting node in the tree for the iterator. - node *Node - - // stack keeps track of edges in the frontier. - stack []rawStackEntry - - // pos is the current position of the iterator. - pos *Node - - // path is the effective path of the current iterator position, - // regardless of whether the current node is a leaf. - path string -} - -// rawStackEntry is used to keep track of the cumulative common path as well as -// its associated edges in the frontier. -type rawStackEntry struct { - path string - edges edges -} - -// Front returns the current node that has been iterated to. -func (i *rawIterator) Front() *Node { - return i.pos -} - -// Path returns the effective path of the current node, even if it's not actually -// a leaf. -func (i *rawIterator) Path() string { - return i.path -} - -// Next advances the iterator to the next node. -func (i *rawIterator) Next() { - // Initialize our stack if needed. - if i.stack == nil && i.node != nil { - i.stack = []rawStackEntry{ - { - edges: edges{ - edge{node: i.node}, - }, - }, - } - } - - for len(i.stack) > 0 { - // Inspect the last element of the stack. - n := len(i.stack) - last := i.stack[n-1] - elem := last.edges[0].node - - // Update the stack. - if len(last.edges) > 1 { - i.stack[n-1].edges = last.edges[1:] - } else { - i.stack = i.stack[:n-1] - } - - // Push the edges onto the frontier. - if len(elem.edges) > 0 { - path := last.path + string(elem.prefix) - i.stack = append(i.stack, rawStackEntry{path, elem.edges}) - } - - i.pos = elem - i.path = last.path + string(elem.prefix) - return - } - - i.pos = nil - i.path = "" -} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go deleted file mode 100644 index 554fa7129c1..00000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go +++ /dev/null @@ -1,239 +0,0 @@ -package iradix - -import ( - "bytes" -) - -// ReverseIterator is used to iterate over a set of nodes -// in reverse in-order -type ReverseIterator struct { - i *Iterator - - // expandedParents stores the set of parent nodes whose relevant children have - // already been pushed into the stack. This can happen during seek or during - // iteration. - // - // Unlike forward iteration we need to recurse into children before we can - // output the value stored in an internal leaf since all children are greater. - // We use this to track whether we have already ensured all the children are - // in the stack. - expandedParents map[*Node]struct{} -} - -// NewReverseIterator returns a new ReverseIterator at a node -func NewReverseIterator(n *Node) *ReverseIterator { - return &ReverseIterator{ - i: &Iterator{node: n}, - } -} - -// SeekPrefixWatch is used to seek the iterator to a given prefix -// and returns the watch channel of the finest granularity -func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { - return ri.i.SeekPrefixWatch(prefix) -} - -// SeekPrefix is used to seek the iterator to a given prefix -func (ri *ReverseIterator) SeekPrefix(prefix []byte) { - ri.i.SeekPrefixWatch(prefix) -} - -// SeekReverseLowerBound is used to seek the iterator to the largest key that is -// lower or equal to the given key. There is no watch variant as it's hard to -// predict based on the radix structure which node(s) changes might affect the -// result. -func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { - // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we - // go because we need only a subset of edges of many nodes in the path to the - // leaf with the lower bound. Note that the iterator will still recurse into - // children that we don't traverse on the way to the reverse lower bound as it - // walks the stack. - ri.i.stack = []edges{} - // ri.i.node starts off in the common case as pointing to the root node of the - // tree. By the time we return we have either found a lower bound and setup - // the stack to traverse all larger keys, or we have not and the stack and - // node should both be nil to prevent the iterator from assuming it is just - // iterating the whole tree from the root node. Either way this needs to end - // up as nil so just set it here. - n := ri.i.node - ri.i.node = nil - search := key - - if ri.expandedParents == nil { - ri.expandedParents = make(map[*Node]struct{}) - } - - found := func(n *Node) { - ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) - // We need to mark this node as expanded in advance too otherwise the - // iterator will attempt to walk all of its children even though they are - // greater than the lower bound we have found. We've expanded it in the - // sense that all of its children that we want to walk are already in the - // stack (i.e. none of them). - ri.expandedParents[n] = struct{}{} - } - - for { - // Compare current prefix with the search key's same-length prefix. - var prefixCmp int - if len(n.prefix) < len(search) { - prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) - } else { - prefixCmp = bytes.Compare(n.prefix, search) - } - - if prefixCmp < 0 { - // Prefix is smaller than search prefix, that means there is no exact - // match for the search key. But we are looking in reverse, so the reverse - // lower bound will be the largest leaf under this subtree, since it is - // the value that would come right before the current search key if it - // were in the tree. So we need to follow the maximum path in this subtree - // to find it. Note that this is exactly what the iterator will already do - // if it finds a node in the stack that has _not_ been marked as expanded - // so in this one case we don't call `found` and instead let the iterator - // do the expansion and recursion through all the children. - ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) - return - } - - if prefixCmp > 0 { - // Prefix is larger than search prefix, or there is no prefix but we've - // also exhausted the search key. Either way, that means there is no - // reverse lower bound since nothing comes before our current search - // prefix. - return - } - - // If this is a leaf, something needs to happen! Note that if it's a leaf - // and prefixCmp was zero (which it must be to get here) then the leaf value - // is either an exact match for the search, or it's lower. It can't be - // greater. - if n.isLeaf() { - - // Firstly, if it's an exact match, we're done! - if bytes.Equal(n.leaf.key, key) { - found(n) - return - } - - // It's not so this node's leaf value must be lower and could still be a - // valid contender for reverse lower bound. - - // If it has no children then we are also done. - if len(n.edges) == 0 { - // This leaf is the lower bound. - found(n) - return - } - - // Finally, this leaf is internal (has children) so we'll keep searching, - // but we need to add it to the iterator's stack since it has a leaf value - // that needs to be iterated over. It needs to be added to the stack - // before its children below as it comes first. - ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) - // We also need to mark it as expanded since we'll be adding any of its - // relevant children below and so don't want the iterator to re-add them - // on its way back up the stack. - ri.expandedParents[n] = struct{}{} - } - - // Consume the search prefix. Note that this is safe because if n.prefix is - // longer than the search slice prefixCmp would have been > 0 above and the - // method would have already returned. - search = search[len(n.prefix):] - - if len(search) == 0 { - // We've exhausted the search key but we are not at a leaf. That means all - // children are greater than the search key so a reverse lower bound - // doesn't exist in this subtree. Note that there might still be one in - // the whole radix tree by following a different path somewhere further - // up. If that's the case then the iterator's stack will contain all the - // smaller nodes already and Previous will walk through them correctly. - return - } - - // Otherwise, take the lower bound next edge. - idx, lbNode := n.getLowerBoundEdge(search[0]) - - // From here, we need to update the stack with all values lower than - // the lower bound edge. Since getLowerBoundEdge() returns -1 when the - // search prefix is larger than all edges, we need to place idx at the - // last edge index so they can all be place in the stack, since they - // come before our search prefix. - if idx == -1 { - idx = len(n.edges) - } - - // Create stack edges for the all strictly lower edges in this node. - if len(n.edges[:idx]) > 0 { - ri.i.stack = append(ri.i.stack, n.edges[:idx]) - } - - // Exit if there's no lower bound edge. The stack will have the previous - // nodes already. - if lbNode == nil { - return - } - - // Recurse - n = lbNode - } -} - -// Previous returns the previous node in reverse order -func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { - // Initialize our stack if needed - if ri.i.stack == nil && ri.i.node != nil { - ri.i.stack = []edges{ - { - edge{node: ri.i.node}, - }, - } - } - - if ri.expandedParents == nil { - ri.expandedParents = make(map[*Node]struct{}) - } - - for len(ri.i.stack) > 0 { - // Inspect the last element of the stack - n := len(ri.i.stack) - last := ri.i.stack[n-1] - m := len(last) - elem := last[m-1].node - - _, alreadyExpanded := ri.expandedParents[elem] - - // If this is an internal node and we've not seen it already, we need to - // leave it in the stack so we can return its possible leaf value _after_ - // we've recursed through all its children. - if len(elem.edges) > 0 && !alreadyExpanded { - // record that we've seen this node! - ri.expandedParents[elem] = struct{}{} - // push child edges onto stack and skip the rest of the loop to recurse - // into the largest one. - ri.i.stack = append(ri.i.stack, elem.edges) - continue - } - - // Remove the node from the stack - if m > 1 { - ri.i.stack[n-1] = last[:m-1] - } else { - ri.i.stack = ri.i.stack[:n-1] - } - // We don't need this state any more as it's no longer in the stack so we - // won't visit it again - if alreadyExpanded { - delete(ri.expandedParents, elem) - } - - // If this is a leaf, return it - if elem.leaf != nil { - return elem.leaf.key, elem.leaf.val, true - } - - // it's not a leaf so keep walking the stack to find the previous leaf - } - return nil, nil, false -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml deleted file mode 100644 index 80e1de44e96..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.6 - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile deleted file mode 100644 index c3989e789f6..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -TEST?=./... - -test: - go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 - go vet $(TEST) - go test $(TEST) -race - -.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md deleted file mode 100644 index 6a128e1e14a..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# rootcerts - -Functions for loading root certificates for TLS connections. - ------ - -Go's standard library `crypto/tls` provides a common mechanism for configuring -TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool -of certificates for the client to use as a trust store when verifying server -certificates. - -This library contains utility functions for loading certificates destined for -that field, as well as one other important thing: - -When the `RootCAs` field is `nil`, the standard library attempts to load the -host's root CA set. This behavior is OS-specific, and the Darwin -implementation contains [a bug that prevents trusted certificates from the -System and Login keychains from being loaded][1]. This library contains -Darwin-specific behavior that works around that bug. - -[1]: https://github.com/golang/go/issues/14514 - -## Example Usage - -Here's a snippet demonstrating how this library is meant to be used: - -```go -func httpClient() (*http.Client, error) - tlsConfig := &tls.Config{} - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: os.Getenv("MYAPP_CAFILE"), - CAPath: os.Getenv("MYAPP_CAPATH"), - Certificate: os.Getenv("MYAPP_CERTIFICATE"), - }) - if err != nil { - return nil, err - } - c := cleanhttp.DefaultClient() - t := cleanhttp.DefaultTransport() - t.TLSClientConfig = tlsConfig - c.Transport = t - return c, nil -} -``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go deleted file mode 100644 index b55cc628485..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package rootcerts contains functions to aid in loading CA certificates for -// TLS connections. -// -// In addition, its default behavior on Darwin works around an open issue [1] -// in Go's crypto/x509 that prevents certicates from being loaded from the -// System or Login keychains. -// -// [1] https://github.com/golang/go/issues/14514 -package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go deleted file mode 100644 index 69aabd6bc74..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go +++ /dev/null @@ -1,123 +0,0 @@ -package rootcerts - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// Config determines where LoadCACerts will load certificates from. When CAFile, -// CACertificate and CAPath are blank, this library's functions will either load -// system roots explicitly and return them, or set the CertPool to nil to allow -// Go's standard library to load system certs. -type Config struct { - // CAFile is a path to a PEM-encoded certificate file or bundle. Takes - // precedence over CACertificate and CAPath. - CAFile string - - // CACertificate is a PEM-encoded certificate or bundle. Takes precedence - // over CAPath. - CACertificate []byte - - // CAPath is a path to a directory populated with PEM-encoded certificates. - CAPath string -} - -// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the -// Config specified. -func ConfigureTLS(t *tls.Config, c *Config) error { - if t == nil { - return nil - } - pool, err := LoadCACerts(c) - if err != nil { - return err - } - t.RootCAs = pool - return nil -} - -// LoadCACerts loads a CertPool based on the Config specified. -func LoadCACerts(c *Config) (*x509.CertPool, error) { - if c == nil { - c = &Config{} - } - if c.CAFile != "" { - return LoadCAFile(c.CAFile) - } - if len(c.CACertificate) != 0 { - return AppendCertificate(c.CACertificate) - } - if c.CAPath != "" { - return LoadCAPath(c.CAPath) - } - - return LoadSystemCAs() -} - -// LoadCAFile loads a single PEM-encoded file from the path specified. -func LoadCAFile(caFile string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Error loading CA File: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) - } - - return pool, nil -} - -// AppendCertificate appends an in-memory PEM-encoded certificate or bundle and returns a pool. -func AppendCertificate(ca []byte) (*x509.CertPool, error) { - pool := x509.NewCertPool() - - ok := pool.AppendCertsFromPEM(ca) - if !ok { - return nil, errors.New("Error appending CA: Couldn't parse PEM") - } - - return pool, nil -} - -// LoadCAPath walks the provided path and loads all certificates encounted into -// a pool. -func LoadCAPath(caPath string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - pem, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("Error loading file from CAPath: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) - } - - return nil - } - - err := filepath.Walk(caPath, walkFn) - if err != nil { - return nil, err - } - - return pool, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go deleted file mode 100644 index 66b1472c4a0..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !darwin - -package rootcerts - -import "crypto/x509" - -// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that -// default behavior of standard TLS config libraries is triggered, which is to -// load system certs. -func LoadSystemCAs() (*x509.CertPool, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go deleted file mode 100644 index a9a040657fe..00000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go +++ /dev/null @@ -1,48 +0,0 @@ -package rootcerts - -import ( - "crypto/x509" - "os/exec" - "path" - - "github.com/mitchellh/go-homedir" -) - -// LoadSystemCAs has special behavior on Darwin systems to work around -func LoadSystemCAs() (*x509.CertPool, error) { - pool := x509.NewCertPool() - - for _, keychain := range certKeychains() { - err := addCertsFromKeychain(pool, keychain) - if err != nil { - return nil, err - } - } - - return pool, nil -} - -func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { - cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) - data, err := cmd.Output() - if err != nil { - return err - } - - pool.AppendCertsFromPEM(data) - - return nil -} - -func certKeychains() []string { - keychains := []string{ - "/System/Library/Keychains/SystemRootCertificates.keychain", - "/Library/Keychains/System.keychain", - } - home, err := homedir.Dir() - if err == nil { - loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") - keychains = append(keychains, loginKeychain) - } - return keychains -} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE deleted file mode 100644 index 0e5d580e0e9..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ /dev/null @@ -1,364 +0,0 @@ -Copyright (c) 2014 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go deleted file mode 100644 index 9233583c91c..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ /dev/null @@ -1,177 +0,0 @@ -package simplelru - -import ( - "container/list" - "errors" -) - -// EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) - -// LRU implements a non-thread safe fixed size LRU cache -type LRU struct { - size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} -} - -// NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { - if size <= 0 { - return nil, errors.New("must provide a positive size") - } - c := &LRU{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - onEvict: onEvict, - } - return c, nil -} - -// Purge is used to completely clear the cache. -func (c *LRU) Purge() { - for k, v := range c.items { - if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) - } - delete(c.items, k) - } - c.evictList.Init() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) (evicted bool) { - // Check for existing item - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value - return false - } - - // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - - evict := c.evictList.Len() > c.size - // Verify size not exceeded - if evict { - c.removeOldest() - } - return evict -} - -// Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - if ent.Value.(*entry) == nil { - return nil, false - } - return ent.Value.(*entry).value, true - } - return -} - -// Contains checks if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { - _, ok = c.items[key] - return ok -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - var ent *list.Element - if ent, ok = c.items[key]; ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *LRU) Remove(key interface{}) (present bool) { - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() -} - -// Resize changes the cache size. -func (c *LRU) Resize(size int) (evicted int) { - diff := c.Len() - size - if diff < 0 { - diff = 0 - } - for i := 0; i < diff; i++ { - c.removeOldest() - } - c.size = size - return diff -} - -// removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - if c.onEvict != nil { - c.onEvict(kv.key, kv.value) - } -} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go deleted file mode 100644 index cb7f8caf03d..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package simplelru provides simple LRU implementation based on build-in container/list. -package simplelru - -// LRUCache is the interface for simple LRU cache. -type LRUCache interface { - // Adds a value to the cache, returns true if an eviction occurred and - // updates the "recently used"-ness of the key. - Add(key, value interface{}) bool - - // Returns key's value from the cache and - // updates the "recently used"-ness of the key. #value, isFound - Get(key interface{}) (value interface{}, ok bool) - - // Checks if a key exists in cache without updating the recent-ness. - Contains(key interface{}) (ok bool) - - // Returns key's value without updating the "recently used"-ness of the key. - Peek(key interface{}) (value interface{}, ok bool) - - // Removes a key from the cache. - Remove(key interface{}) bool - - // Removes the oldest entry from cache. - RemoveOldest() (interface{}, interface{}, bool) - - // Returns the oldest entry from the cache. #key, value, isFound - GetOldest() (interface{}, interface{}, bool) - - // Returns a slice of the keys in the cache, from oldest to newest. - Keys() []interface{} - - // Returns the number of items in the cache. - Len() int - - // Clears all cache entries. - Purge() - - // Resizes cache, returning number evicted - Resize(int) int -} diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/vendor/github.com/hashicorp/serf/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go deleted file mode 100644 index 32124a73a20..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/client.go +++ /dev/null @@ -1,243 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/armon/go-metrics" -) - -// Client manages the estimated network coordinate for a given node, and adjusts -// it as the node observes round trip times and estimated coordinates from other -// nodes. The core algorithm is based on Vivaldi, see the documentation for Config -// for more details. -type Client struct { - // coord is the current estimate of the client's network coordinate. - coord *Coordinate - - // origin is a coordinate sitting at the origin. - origin *Coordinate - - // config contains the tuning parameters that govern the performance of - // the algorithm. - config *Config - - // adjustmentIndex is the current index into the adjustmentSamples slice. - adjustmentIndex uint - - // adjustment is used to store samples for the adjustment calculation. - adjustmentSamples []float64 - - // latencyFilterSamples is used to store the last several RTT samples, - // keyed by node name. We will use the config's LatencyFilterSamples - // value to determine how many samples we keep, per node. - latencyFilterSamples map[string][]float64 - - // stats is used to record events that occur when updating coordinates. - stats ClientStats - - // mutex enables safe concurrent access to the client. - mutex sync.RWMutex -} - -// ClientStats is used to record events that occur when updating coordinates. -type ClientStats struct { - // Resets is incremented any time we reset our local coordinate because - // our calculations have resulted in an invalid state. - Resets int -} - -// NewClient creates a new Client and verifies the configuration is valid. -func NewClient(config *Config) (*Client, error) { - if !(config.Dimensionality > 0) { - return nil, fmt.Errorf("dimensionality must be >0") - } - - return &Client{ - coord: NewCoordinate(config), - origin: NewCoordinate(config), - config: config, - adjustmentIndex: 0, - adjustmentSamples: make([]float64, config.AdjustmentWindowSize), - latencyFilterSamples: make(map[string][]float64), - }, nil -} - -// GetCoordinate returns a copy of the coordinate for this client. -func (c *Client) GetCoordinate() *Coordinate { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.Clone() -} - -// SetCoordinate forces the client's coordinate to a known state. -func (c *Client) SetCoordinate(coord *Coordinate) error { - c.mutex.Lock() - defer c.mutex.Unlock() - - if err := c.checkCoordinate(coord); err != nil { - return err - } - - c.coord = coord.Clone() - return nil -} - -// ForgetNode removes any client state for the given node. -func (c *Client) ForgetNode(node string) { - c.mutex.Lock() - defer c.mutex.Unlock() - - delete(c.latencyFilterSamples, node) -} - -// Stats returns a copy of stats for the client. -func (c *Client) Stats() ClientStats { - c.mutex.Lock() - defer c.mutex.Unlock() - - return c.stats -} - -// checkCoordinate returns an error if the coordinate isn't compatible with -// this client, or if the coordinate itself isn't valid. This assumes the mutex -// has been locked already. -func (c *Client) checkCoordinate(coord *Coordinate) error { - if !c.coord.IsCompatibleWith(coord) { - return fmt.Errorf("dimensions aren't compatible") - } - - if !coord.IsValid() { - return fmt.Errorf("coordinate is invalid") - } - - return nil -} - -// latencyFilter applies a simple moving median filter with a new sample for -// a node. This assumes that the mutex has been locked already. -func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { - samples, ok := c.latencyFilterSamples[node] - if !ok { - samples = make([]float64, 0, c.config.LatencyFilterSize) - } - - // Add the new sample and trim the list, if needed. - samples = append(samples, rttSeconds) - if len(samples) > int(c.config.LatencyFilterSize) { - samples = samples[1:] - } - c.latencyFilterSamples[node] = samples - - // Sort a copy of the samples and return the median. - sorted := make([]float64, len(samples)) - copy(sorted, samples) - sort.Float64s(sorted) - return sorted[len(sorted)/2] -} - -// updateVivialdi updates the Vivaldi portion of the client's coordinate. This -// assumes that the mutex has been locked already. -func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { - const zeroThreshold = 1.0e-6 - - dist := c.coord.DistanceTo(other).Seconds() - if rttSeconds < zeroThreshold { - rttSeconds = zeroThreshold - } - wrongness := math.Abs(dist-rttSeconds) / rttSeconds - - totalError := c.coord.Error + other.Error - if totalError < zeroThreshold { - totalError = zeroThreshold - } - weight := c.coord.Error / totalError - - c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) - if c.coord.Error > c.config.VivaldiErrorMax { - c.coord.Error = c.config.VivaldiErrorMax - } - - delta := c.config.VivaldiCC * weight - force := delta * (rttSeconds - dist) - c.coord = c.coord.ApplyForce(c.config, force, other) -} - -// updateAdjustment updates the adjustment portion of the client's coordinate, if -// the feature is enabled. This assumes that the mutex has been locked already. -func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { - if c.config.AdjustmentWindowSize == 0 { - return - } - - // Note that the existing adjustment factors don't figure in to this - // calculation so we use the raw distance here. - dist := c.coord.rawDistanceTo(other) - c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist - c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize - - sum := 0.0 - for _, sample := range c.adjustmentSamples { - sum += sample - } - c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) -} - -// updateGravity applies a small amount of gravity to pull coordinates towards -// the center of the coordinate system to combat drift. This assumes that the -// mutex is locked already. -func (c *Client) updateGravity() { - dist := c.origin.DistanceTo(c.coord).Seconds() - force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) - c.coord = c.coord.ApplyForce(c.config, force, c.origin) -} - -// Update takes other, a coordinate for another node, and rtt, a round trip -// time observation for a ping to that node, and updates the estimated position of -// the client's coordinate. Returns the updated coordinate. -func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { - c.mutex.Lock() - defer c.mutex.Unlock() - - if err := c.checkCoordinate(other); err != nil { - return nil, err - } - - // The code down below can handle zero RTTs, which we have seen in - // https://github.com/hashicorp/consul/issues/3789, presumably in - // environments with coarse-grained monotonic clocks (we are still - // trying to pin this down). In any event, this is ok from a code PoV - // so we don't need to alert operators with spammy messages. We did - // add a counter so this is still observable, though. - const maxRTT = 10 * time.Second - if rtt < 0 || rtt > maxRTT { - return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) - } - if rtt == 0 { - metrics.IncrCounterWithLabels([]string{"serf", "coordinate", "zero-rtt"}, 1, c.config.MetricLabels) - } - - rttSeconds := c.latencyFilter(node, rtt.Seconds()) - c.updateVivaldi(other, rttSeconds) - c.updateAdjustment(other, rttSeconds) - c.updateGravity() - if !c.coord.IsValid() { - c.stats.Resets++ - c.coord = NewCoordinate(c.config) - } - - return c.coord.Clone(), nil -} - -// DistanceTo returns the estimated RTT from the client's coordinate to other, the -// coordinate for another node. -func (c *Client) DistanceTo(other *Coordinate) time.Duration { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.DistanceTo(other) -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go deleted file mode 100644 index 09c0cafe830..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/config.go +++ /dev/null @@ -1,77 +0,0 @@ -package coordinate - -import ( - "github.com/armon/go-metrics" -) - -// Config is used to set the parameters of the Vivaldi-based coordinate mapping -// algorithm. -// -// The following references are called out at various points in the documentation -// here: -// -// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." -// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. -// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates -// in the Wild." NSDI. Vol. 7. 2007. -// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for -// host-based network coordinate systems." Networking, IEEE/ACM Transactions -// on 18.1 (2010): 27-40. -type Config struct { - // The dimensionality of the coordinate system. As discussed in [2], more - // dimensions improves the accuracy of the estimates up to a point. Per [2] - // we chose 8 dimensions plus a non-Euclidean height. - Dimensionality uint - - // VivaldiErrorMax is the default error value when a node hasn't yet made - // any observations. It also serves as an upper limit on the error value in - // case observations cause the error value to increase without bound. - VivaldiErrorMax float64 - - // VivaldiCE is a tuning factor that controls the maximum impact an - // observation can have on a node's confidence. See [1] for more details. - VivaldiCE float64 - - // VivaldiCC is a tuning factor that controls the maximum impact an - // observation can have on a node's coordinate. See [1] for more details. - VivaldiCC float64 - - // AdjustmentWindowSize is a tuning factor that determines how many samples - // we retain to calculate the adjustment factor as discussed in [3]. Setting - // this to zero disables this feature. - AdjustmentWindowSize uint - - // HeightMin is the minimum value of the height parameter. Since this - // always must be positive, it will introduce a small amount error, so - // the chosen value should be relatively small compared to "normal" - // coordinates. - HeightMin float64 - - // LatencyFilterSamples is the maximum number of samples that are retained - // per node, in order to compute a median. The intent is to ride out blips - // but still keep the delay low, since our time to probe any given node is - // pretty infrequent. See [2] for more details. - LatencyFilterSize uint - - // GravityRho is a tuning factor that sets how much gravity has an effect - // to try to re-center coordinates. See [2] for more details. - GravityRho float64 - - // metricLabels is the slice of labels to put on all emitted metrics - MetricLabels []metrics.Label -} - -// DefaultConfig returns a Config that has some default values suitable for -// basic testing of the algorithm, but not tuned to any particular type of cluster. -func DefaultConfig() *Config { - return &Config{ - Dimensionality: 8, - VivaldiErrorMax: 1.5, - VivaldiCE: 0.25, - VivaldiCC: 0.25, - AdjustmentWindowSize: 20, - HeightMin: 10.0e-6, - LatencyFilterSize: 3, - GravityRho: 150.0, - } -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go deleted file mode 100644 index fbe792c90d4..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go +++ /dev/null @@ -1,203 +0,0 @@ -package coordinate - -import ( - "math" - "math/rand" - "time" -) - -// Coordinate is a specialized structure for holding network coordinates for the -// Vivaldi-based coordinate mapping algorithm. All of the fields should be public -// to enable this to be serialized. All values in here are in units of seconds. -type Coordinate struct { - // Vec is the Euclidean portion of the coordinate. This is used along - // with the other fields to provide an overall distance estimate. The - // units here are seconds. - Vec []float64 - - // Err reflects the confidence in the given coordinate and is updated - // dynamically by the Vivaldi Client. This is dimensionless. - Error float64 - - // Adjustment is a distance offset computed based on a calculation over - // observations from all other nodes over a fixed window and is updated - // dynamically by the Vivaldi Client. The units here are seconds. - Adjustment float64 - - // Height is a distance offset that accounts for non-Euclidean effects - // which model the access links from nodes to the core Internet. The access - // links are usually set by bandwidth and congestion, and the core links - // usually follow distance based on geography. - Height float64 -} - -const ( - // secondsToNanoseconds is used to convert float seconds to nanoseconds. - secondsToNanoseconds = 1.0e9 - - // zeroThreshold is used to decide if two coordinates are on top of each - // other. - zeroThreshold = 1.0e-6 -) - -// ErrDimensionalityConflict will be panic-d if you try to perform operations -// with incompatible dimensions. -type DimensionalityConflictError struct{} - -// Adds the error interface. -func (e DimensionalityConflictError) Error() string { - return "coordinate dimensionality does not match" -} - -// NewCoordinate creates a new coordinate at the origin, using the given config -// to supply key initial values. -func NewCoordinate(config *Config) *Coordinate { - return &Coordinate{ - Vec: make([]float64, config.Dimensionality), - Error: config.VivaldiErrorMax, - Adjustment: 0.0, - Height: config.HeightMin, - } -} - -// Clone creates an independent copy of this coordinate. -func (c *Coordinate) Clone() *Coordinate { - vec := make([]float64, len(c.Vec)) - copy(vec, c.Vec) - return &Coordinate{ - Vec: vec, - Error: c.Error, - Adjustment: c.Adjustment, - Height: c.Height, - } -} - -// componentIsValid returns false if a floating point value is a NaN or an -// infinity. -func componentIsValid(f float64) bool { - return !math.IsInf(f, 0) && !math.IsNaN(f) -} - -// IsValid returns false if any component of a coordinate isn't valid, per the -// componentIsValid() helper above. -func (c *Coordinate) IsValid() bool { - for i := range c.Vec { - if !componentIsValid(c.Vec[i]) { - return false - } - } - - return componentIsValid(c.Error) && - componentIsValid(c.Adjustment) && - componentIsValid(c.Height) -} - -// IsCompatibleWith checks to see if the two coordinates are compatible -// dimensionally. If this returns true then you are guaranteed to not get -// any runtime errors operating on them. -func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { - return len(c.Vec) == len(other.Vec) -} - -// ApplyForce returns the result of applying the force from the direction of the -// other coordinate. -func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - ret := c.Clone() - unit, mag := unitVectorAt(c.Vec, other.Vec) - ret.Vec = add(ret.Vec, mul(unit, force)) - if mag > zeroThreshold { - ret.Height = (ret.Height+other.Height)*force/mag + ret.Height - ret.Height = math.Max(ret.Height, config.HeightMin) - } - return ret -} - -// DistanceTo returns the distance between this coordinate and the other -// coordinate, including adjustments. -func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - dist := c.rawDistanceTo(other) - adjustedDist := dist + c.Adjustment + other.Adjustment - if adjustedDist > 0.0 { - dist = adjustedDist - } - return time.Duration(dist * secondsToNanoseconds) -} - -// rawDistanceTo returns the Vivaldi distance between this coordinate and the -// other coordinate in seconds, not including adjustments. This assumes the -// dimensions have already been checked to be compatible. -func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { - return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height -} - -// add returns the sum of vec1 and vec2. This assumes the dimensions have -// already been checked to be compatible. -func add(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i := range ret { - ret[i] = vec1[i] + vec2[i] - } - return ret -} - -// diff returns the difference between the vec1 and vec2. This assumes the -// dimensions have already been checked to be compatible. -func diff(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i := range ret { - ret[i] = vec1[i] - vec2[i] - } - return ret -} - -// mul returns vec multiplied by a scalar factor. -func mul(vec []float64, factor float64) []float64 { - ret := make([]float64, len(vec)) - for i := range vec { - ret[i] = vec[i] * factor - } - return ret -} - -// magnitude computes the magnitude of the vec. -func magnitude(vec []float64) float64 { - sum := 0.0 - for i := range vec { - sum += vec[i] * vec[i] - } - return math.Sqrt(sum) -} - -// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two -// positions are the same then a random unit vector is returned. We also return -// the distance between the points for use in the later height calculation. -func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { - ret := diff(vec1, vec2) - - // If the coordinates aren't on top of each other we can normalize. - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), mag - } - - // Otherwise, just return a random unit vector. - for i := range ret { - ret[i] = rand.Float64() - 0.5 - } - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), 0.0 - } - - // And finally just give up and make a unit vector along the first - // dimension. This should be exceedingly rare. - ret = make([]float64, len(ret)) - ret[0] = 1.0 - return ret, 0.0 -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go deleted file mode 100644 index 66da4e2e92e..00000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/phantom.go +++ /dev/null @@ -1,187 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "math/rand" - "time" -) - -// GenerateClients returns a slice with nodes number of clients, all with the -// given config. -func GenerateClients(nodes int, config *Config) ([]*Client, error) { - clients := make([]*Client, nodes) - for i := range clients { - client, err := NewClient(config) - if err != nil { - return nil, err - } - - clients[i] = client - } - return clients, nil -} - -// GenerateLine returns a truth matrix as if all the nodes are in a straight linke -// with the given spacing between them. -func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := time.Duration(j-i) * spacing - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional -// grid with the given spacing between them. -func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - n := int(math.Sqrt(float64(nodes))) - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - x1, y1 := float64(i%n), float64(i/n) - x2, y2 := float64(j%n), float64(j/n) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt := time.Duration(dist * float64(spacing)) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateSplit returns a truth matrix as if half the nodes are close together in -// one location and half the nodes are close together in another. The lan factor -// is used to separate the nodes locally and the wan factor represents the split -// between the two sides. -func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - split := nodes / 2 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := lan - if (i <= split && j > split) || (i > split && j <= split) { - rtt += wan - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed -// around a circle with the given radius. The first node is at the "center" of the -// circle because it's equidistant from all the other nodes, but we place it at -// double the radius, so it should show up above all the other nodes in height. -func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - var rtt time.Duration - if i == 0 { - rtt = 2 * radius - } else { - t1 := 2.0 * math.Pi * float64(i) / float64(nodes) - x1, y1 := math.Cos(t1), math.Sin(t1) - t2 := 2.0 * math.Pi * float64(j) / float64(nodes) - x2, y2 := math.Cos(t2), math.Sin(t2) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt = time.Duration(dist * float64(radius)) - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateRandom returns a truth matrix for a set of nodes with normally -// distributed delays, with the given mean and deviation. The RNG is re-seeded -// so you always get the same matrix for a given size. -func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { - rand.Seed(1) - - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() - rtt := time.Duration(rttSeconds * secondsToNanoseconds) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// Simulate runs the given number of cycles using the given list of clients and -// truth matrix. On each cycle, each client will pick a random node and observe -// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for -// each simulation run to get deterministic results (for this algorithm and the -// underlying algorithm which will use random numbers for position vectors when -// starting out with everything at the origin). -func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { - rand.Seed(1) - - nodes := len(clients) - for cycle := 0; cycle < cycles; cycle++ { - for i := range clients { - if j := rand.Intn(nodes); j != i { - c := clients[j].GetCoordinate() - rtt := truth[i][j] - node := fmt.Sprintf("node_%d", j) - clients[i].Update(node, c, rtt) - } - } - } -} - -// Stats is returned from the Evaluate function with a summary of the algorithm -// performance. -type Stats struct { - ErrorMax float64 - ErrorAvg float64 -} - -// Evaluate uses the coordinates of the given clients to calculate estimated -// distances and compares them with the given truth matrix, returning summary -// stats. -func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { - nodes := len(clients) - count := 0 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() - actual := truth[i][j].Seconds() - error := math.Abs(est-actual) / actual - stats.ErrorMax = math.Max(stats.ErrorMax, error) - stats.ErrorAvg += error - count += 1 - } - } - - stats.ErrorAvg /= float64(count) - fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) - return -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a51e0..00000000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5b35..00000000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 25378537ead..00000000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,167 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -// Reset clears the cache, forcing the next call to Dir to re-detect -// the home directory. This generally never has to be called, but can be -// useful in tests if you're modifying the home directory via the HOME -// env var or something. -func Reset() { - cacheLock.Lock() - defer cacheLock.Unlock() - homedirCache = "" -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE deleted file mode 100644 index a3866a291fd..00000000000 --- a/vendor/github.com/mitchellh/hashstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md deleted file mode 100644 index feb0c24962c..00000000000 --- a/vendor/github.com/mitchellh/hashstructure/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) - -hashstructure is a Go library for creating a unique hash value -for arbitrary values in Go. - -This can be used to key values in a hash (for use in a map, set, etc.) -that are complex. The most common use case is comparing two values without -sending data across the network, caching values locally (de-dup), and so on. - -## Features - - * Hash any arbitrary Go value, including complex types. - - * Tag a struct field to ignore it and not affect the hash value. - - * Tag a slice type struct field to treat it as a set where ordering - doesn't affect the hash code but the field itself is still taken into - account to create the hash value. - - * Optionally, specify a custom hash function to optimize for speed, collision - avoidance for your data set, etc. - - * Optionally, hash the output of `.String()` on structs that implement fmt.Stringer, - allowing effective hashing of time.Time - - * Optionally, override the hashing process by implementing `Hashable`. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/hashstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). - -A quick code example is shown below: - -```go -type ComplexStruct struct { - Name string - Age uint - Metadata map[string]interface{} -} - -v := ComplexStruct{ - Name: "mitchellh", - Age: 64, - Metadata: map[string]interface{}{ - "car": true, - "location": "California", - "siblings": []string{"Bob", "John"}, - }, -} - -hash, err := hashstructure.Hash(v, nil) -if err != nil { - panic(err) -} - -fmt.Printf("%d", hash) -// Output: -// 2307517237273902113 -``` diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go deleted file mode 100644 index 89dd4d3ea89..00000000000 --- a/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ /dev/null @@ -1,422 +0,0 @@ -package hashstructure - -import ( - "encoding/binary" - "fmt" - "hash" - "hash/fnv" - "reflect" - "time" -) - -// ErrNotStringer is returned when there's an error with hash:"string" -type ErrNotStringer struct { - Field string -} - -// Error implements error for ErrNotStringer -func (ens *ErrNotStringer) Error() string { - return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) -} - -// HashOptions are options that are available for hashing. -type HashOptions struct { - // Hasher is the hash function to use. If this isn't set, it will - // default to FNV. - Hasher hash.Hash64 - - // TagName is the struct tag to look at when hashing the structure. - // By default this is "hash". - TagName string - - // ZeroNil is flag determining if nil pointer should be treated equal - // to a zero value of pointed type. By default this is false. - ZeroNil bool - - // IgnoreZeroValue is determining if zero value fields should be - // ignored for hash calculation. - IgnoreZeroValue bool - - // SlicesAsSets assumes that a `set` tag is always present for slices. - // Default is false (in which case the tag is used instead) - SlicesAsSets bool - - // UseStringer will attempt to use fmt.Stringer aways. If the struct - // doesn't implement fmt.Stringer, it'll fall back to trying usual tricks. - // If this is true, and the "string" tag is also set, the tag takes - // precedense (meaning that if the type doesn't implement fmt.Stringer, we - // panic) - UseStringer bool -} - -// Hash returns the hash value of an arbitrary value. -// -// If opts is nil, then default options will be used. See HashOptions -// for the default values. The same *HashOptions value cannot be used -// concurrently. None of the values within a *HashOptions struct are -// safe to read/write while hashing is being done. -// -// Notes on the value: -// -// * Unexported fields on structs are ignored and do not affect the -// hash value. -// -// * Adding an exported field to a struct with the zero value will change -// the hash value. -// -// For structs, the hashing can be controlled using tags. For example: -// -// struct { -// Name string -// UUID string `hash:"ignore"` -// } -// -// The available tag values are: -// -// * "ignore" or "-" - The field will be ignored and not affect the hash code. -// -// * "set" - The field will be treated as a set, where ordering doesn't -// affect the hash code. This only works for slices. -// -// * "string" - The field will be hashed as a string, only works when the -// field implements fmt.Stringer -// -func Hash(v interface{}, opts *HashOptions) (uint64, error) { - // Create default options - if opts == nil { - opts = &HashOptions{} - } - if opts.Hasher == nil { - opts.Hasher = fnv.New64() - } - if opts.TagName == "" { - opts.TagName = "hash" - } - - // Reset the hash - opts.Hasher.Reset() - - // Create our walker and walk the structure - w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - zeronil: opts.ZeroNil, - ignorezerovalue: opts.IgnoreZeroValue, - sets: opts.SlicesAsSets, - stringer: opts.UseStringer, - } - return w.visit(reflect.ValueOf(v), nil) -} - -type walker struct { - h hash.Hash64 - tag string - zeronil bool - ignorezerovalue bool - sets bool - stringer bool -} - -type visitOpts struct { - // Flags are a bitmask of flags to affect behavior of this visit - Flags visitFlag - - // Information about the struct containing this field - Struct interface{} - StructField string -} - -var timeType = reflect.TypeOf(time.Time{}) - -func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { - t := reflect.TypeOf(0) - - // Loop since these can be wrapped in multiple layers of pointers - // and interfaces. - for { - // If we have an interface, dereference it. We have to do this up - // here because it might be a nil in there and the check below must - // catch that. - if v.Kind() == reflect.Interface { - v = v.Elem() - continue - } - - if v.Kind() == reflect.Ptr { - if w.zeronil { - t = v.Type().Elem() - } - v = reflect.Indirect(v) - continue - } - - break - } - - // If it is nil, treat it like a zero. - if !v.IsValid() { - v = reflect.Zero(t) - } - - // Binary writing can use raw ints, we have to convert to - // a sized-int, we'll choose the largest... - switch v.Kind() { - case reflect.Int: - v = reflect.ValueOf(int64(v.Int())) - case reflect.Uint: - v = reflect.ValueOf(uint64(v.Uint())) - case reflect.Bool: - var tmp int8 - if v.Bool() { - tmp = 1 - } - v = reflect.ValueOf(tmp) - } - - k := v.Kind() - - // We can shortcut numeric values by directly binary writing them - if k >= reflect.Int && k <= reflect.Complex64 { - // A direct hash calculation - w.h.Reset() - err := binary.Write(w.h, binary.LittleEndian, v.Interface()) - return w.h.Sum64(), err - } - - switch v.Type() { - case timeType: - w.h.Reset() - b, err := v.Interface().(time.Time).MarshalBinary() - if err != nil { - return 0, err - } - - err = binary.Write(w.h, binary.LittleEndian, b) - return w.h.Sum64(), err - } - - switch k { - case reflect.Array: - var h uint64 - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - h = hashUpdateOrdered(w.h, h, current) - } - - return h, nil - - case reflect.Map: - var includeMap IncludableMap - if opts != nil && opts.Struct != nil { - if v, ok := opts.Struct.(IncludableMap); ok { - includeMap = v - } - } - - // Build the hash for the map. We do this by XOR-ing all the key - // and value hashes. This makes it deterministic despite ordering. - var h uint64 - for _, k := range v.MapKeys() { - v := v.MapIndex(k) - if includeMap != nil { - incl, err := includeMap.HashIncludeMap( - opts.StructField, k.Interface(), v.Interface()) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - kh, err := w.visit(k, nil) - if err != nil { - return 0, err - } - vh, err := w.visit(v, nil) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - - return h, nil - - case reflect.Struct: - parent := v.Interface() - var include Includable - if impl, ok := parent.(Includable); ok { - include = impl - } - - if impl, ok := parent.(Hashable); ok { - return impl.Hash() - } - - // If we can address this value, check if the pointer value - // implements our interfaces and use that if so. - if v.CanAddr() { - vptr := v.Addr() - parentptr := vptr.Interface() - if impl, ok := parentptr.(Includable); ok { - include = impl - } - - if impl, ok := parentptr.(Hashable); ok { - return impl.Hash() - } - } - - t := v.Type() - h, err := w.visit(reflect.ValueOf(t.Name()), nil) - if err != nil { - return 0, err - } - - l := v.NumField() - for i := 0; i < l; i++ { - if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - - var f visitFlag - fieldType := t.Field(i) - if fieldType.PkgPath != "" { - // Unexported - continue - } - - tag := fieldType.Tag.Get(w.tag) - if tag == "ignore" || tag == "-" { - // Ignore this field - continue - } - - if w.ignorezerovalue { - zeroVal := reflect.Zero(reflect.TypeOf(innerV.Interface())).Interface() - if innerV.Interface() == zeroVal { - continue - } - } - - // if string is set, use the string value - if tag == "string" || w.stringer { - if impl, ok := innerV.Interface().(fmt.Stringer); ok { - innerV = reflect.ValueOf(impl.String()) - } else if tag == "string" { - // We only show this error if the tag explicitly - // requests a stringer. - return 0, &ErrNotStringer{ - Field: v.Type().Field(i).Name, - } - } - } - - // Check if we implement includable and check it - if include != nil { - incl, err := include.HashInclude(fieldType.Name, innerV) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - switch tag { - case "set": - f |= visitFlagSet - } - - kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) - if err != nil { - return 0, err - } - - vh, err := w.visit(innerV, &visitOpts{ - Flags: f, - Struct: parent, - StructField: fieldType.Name, - }) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - } - - return h, nil - - case reflect.Slice: - // We have two behaviors here. If it isn't a set, then we just - // visit all the elements. If it is a set, then we do a deterministic - // hash code. - var h uint64 - var set bool - if opts != nil { - set = (opts.Flags & visitFlagSet) != 0 - } - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - if set || w.sets { - h = hashUpdateUnordered(h, current) - } else { - h = hashUpdateOrdered(w.h, h, current) - } - } - - return h, nil - - case reflect.String: - // Directly hash - w.h.Reset() - _, err := w.h.Write([]byte(v.String())) - return w.h.Sum64(), err - - default: - return 0, fmt.Errorf("unknown kind to hash: %s", k) - } - -} - -func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { - // For ordered updates, use a real hash function - h.Reset() - - // We just panic if the binary writes fail because we are writing - // an int64 which should never be fail-able. - e1 := binary.Write(h, binary.LittleEndian, a) - e2 := binary.Write(h, binary.LittleEndian, b) - if e1 != nil { - panic(e1) - } - if e2 != nil { - panic(e2) - } - - return h.Sum64() -} - -func hashUpdateUnordered(a, b uint64) uint64 { - return a ^ b -} - -// visitFlag is used as a bitmask for affecting visit behavior -type visitFlag uint - -const ( - visitFlagInvalid visitFlag = iota - visitFlagSet = iota << 1 -) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go deleted file mode 100644 index 702d35415d4..00000000000 --- a/vendor/github.com/mitchellh/hashstructure/include.go +++ /dev/null @@ -1,22 +0,0 @@ -package hashstructure - -// Includable is an interface that can optionally be implemented by -// a struct. It will be called for each field in the struct to check whether -// it should be included in the hash. -type Includable interface { - HashInclude(field string, v interface{}) (bool, error) -} - -// IncludableMap is an interface that can optionally be implemented by -// a struct. It will be called when a map-type field is found to ask the -// struct if the map item should be included in the hash. -type IncludableMap interface { - HashIncludeMap(field string, k, v interface{}) (bool, error) -} - -// Hashable is an interface that can optionally be implemented by a struct -// to override the hash value. This value will override the hash value for -// the entire struct. Entries in the struct will not be hashed. -type Hashable interface { - Hash() (uint64, error) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index c364b07f936..ef8dacda0b9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -81,9 +81,6 @@ github.com/alexedwards/argon2id # github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 ## explicit github.com/amoghe/go-crypt -# github.com/armon/go-metrics v0.4.1 -## explicit; go 1.12 -github.com/armon/go-metrics # github.com/armon/go-radix v1.0.0 ## explicit github.com/armon/go-radix @@ -940,27 +937,9 @@ github.com/go-micro/plugins/v4/events/natsjs # github.com/go-micro/plugins/v4/logger/zerolog v1.2.0 ## explicit; go 1.17 github.com/go-micro/plugins/v4/logger/zerolog -# github.com/go-micro/plugins/v4/registry/consul v1.2.1 -## explicit; go 1.17 -github.com/go-micro/plugins/v4/registry/consul -# github.com/go-micro/plugins/v4/registry/etcd v1.2.0 -## explicit; go 1.17 -github.com/go-micro/plugins/v4/registry/etcd -# github.com/go-micro/plugins/v4/registry/kubernetes v1.1.2 -## explicit; go 1.18 -github.com/go-micro/plugins/v4/registry/kubernetes -github.com/go-micro/plugins/v4/registry/kubernetes/client -github.com/go-micro/plugins/v4/registry/kubernetes/client/api -github.com/go-micro/plugins/v4/registry/kubernetes/client/watch -# github.com/go-micro/plugins/v4/registry/mdns v1.2.0 -## explicit; go 1.17 -github.com/go-micro/plugins/v4/registry/mdns # github.com/go-micro/plugins/v4/registry/memory v1.2.0 ## explicit; go 1.17 github.com/go-micro/plugins/v4/registry/memory -# github.com/go-micro/plugins/v4/registry/nats v1.2.2 -## explicit; go 1.17 -github.com/go-micro/plugins/v4/registry/nats # github.com/go-micro/plugins/v4/server/grpc v1.2.0 ## explicit; go 1.17 github.com/go-micro/plugins/v4/server/grpc @@ -1180,21 +1159,9 @@ github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hashicorp/consul/api v1.25.1 -## explicit; go 1.19 -github.com/hashicorp/consul/api -github.com/hashicorp/consul/api/watch -# github.com/hashicorp/go-cleanhttp v0.5.2 -## explicit; go 1.13 -github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-hclog v1.6.3 ## explicit; go 1.13 github.com/hashicorp/go-hclog -# github.com/hashicorp/go-immutable-radix v1.3.1 -## explicit -github.com/hashicorp/go-immutable-radix -# github.com/hashicorp/go-msgpack v1.1.5 -## explicit; go 1.13 # github.com/hashicorp/go-plugin v1.6.1 ## explicit; go 1.17 github.com/hashicorp/go-plugin @@ -1202,15 +1169,6 @@ github.com/hashicorp/go-plugin/internal/cmdrunner github.com/hashicorp/go-plugin/internal/grpcmux github.com/hashicorp/go-plugin/internal/plugin github.com/hashicorp/go-plugin/runner -# github.com/hashicorp/go-rootcerts v1.0.2 -## explicit; go 1.12 -github.com/hashicorp/go-rootcerts -# github.com/hashicorp/golang-lru v0.6.0 -## explicit; go 1.12 -github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/serf v0.10.1 -## explicit; go 1.12 -github.com/hashicorp/serf/coordinate # github.com/hashicorp/yamux v0.1.1 ## explicit; go 1.15 github.com/hashicorp/yamux @@ -1393,15 +1351,9 @@ github.com/minio/sha256-simd # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure -# github.com/mitchellh/go-homedir v1.1.0 -## explicit -github.com/mitchellh/go-homedir # github.com/mitchellh/go-testing-interface v1.14.1 ## explicit; go 1.14 github.com/mitchellh/go-testing-interface -# github.com/mitchellh/hashstructure v1.1.0 -## explicit; go 1.14 -github.com/mitchellh/hashstructure # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure From 9f9a9111d9d25d4334f2f8d03ccd42890f3997a3 Mon Sep 17 00:00:00 2001 From: jkoberg Date: Tue, 15 Oct 2024 16:38:54 +0200 Subject: [PATCH 5/6] feat(docs): regenerate docs Signed-off-by: jkoberg --- docs/helpers/configenvextractor.go | 4 +- docs/helpers/env_vars.yaml | 297 ++++++++++++++--------------- docs/helpers/extended_vars.yaml | 20 +- 3 files changed, 166 insertions(+), 155 deletions(-) diff --git a/docs/helpers/configenvextractor.go b/docs/helpers/configenvextractor.go index a553aa72b46..b680186df34 100644 --- a/docs/helpers/configenvextractor.go +++ b/docs/helpers/configenvextractor.go @@ -71,9 +71,9 @@ func runIntermediateCode(intermediateCodePath string) { defaultDataPath := "/var/lib/ocis" os.Setenv("OCIS_BASE_DATA_PATH", defaultDataPath) os.Setenv("OCIS_CONFIG_DIR", defaultConfigPath) - out, err := exec.Command("go", "run", intermediateCodePath).Output() + out, err := exec.Command("go", "run", intermediateCodePath).CombinedOutput() if err != nil { - log.Fatal(err) + log.Fatal(string(out), err) } fmt.Println(string(out)) } diff --git a/docs/helpers/env_vars.yaml b/docs/helpers/env_vars.yaml index 00907a68d9d..3895173b347 100644 --- a/docs/helpers/env_vars.yaml +++ b/docs/helpers/env_vars.yaml @@ -167,9 +167,8 @@ ACTIVITYLOG_STORE: name: OCIS_PERSISTENT_STORE;ACTIVITYLOG_STORE defaultValue: nats-js-kv type: string - description: 'The type of the store. Supported values are: ''memory'', ''ocmem'', - ''etcd'', ''redis'', ''redis-sentinel'', ''nats-js'', ''noop''. See the text description - for details.' + description: 'The type of the store. Supported values are: ''memory'', ''nats-js-kv'', + ''redis-sentinel'', ''noop''. See the text description for details.' introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -208,9 +207,9 @@ ACTIVITYLOG_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -3182,9 +3181,8 @@ EVENTHISTORY_STORE: name: OCIS_PERSISTENT_STORE;EVENTHISTORY_STORE defaultValue: nats-js-kv type: string - description: 'The type of the store. Supported values are: ''memory'', ''ocmem'', - ''etcd'', ''redis'', ''redis-sentinel'', ''nats-js'', ''noop''. See the text description - for details.' + description: 'The type of the store. Supported values are: ''memory'', ''nats-js-kv'', + ''redis-sentinel'', ''noop''. See the text description for details.' introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -3223,9 +3221,9 @@ EVENTHISTORY_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -3872,9 +3870,9 @@ FRONTEND_OCS_STAT_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -4210,9 +4208,9 @@ GATEWAY_CREATE_HOME_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -4434,9 +4432,9 @@ GATEWAY_PROVIDER_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -4714,9 +4712,9 @@ GRAPH_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store are configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -7483,12 +7481,12 @@ NOTIFICATIONS_SMTP_ENCRYPTION: defaultValue: none type: string description: Encryption method for the SMTP communication. Possible values are 'starttls', - 'ssl', 'ssltls', 'tls' and 'none'. + 'ssltls' and 'none'. introductionVersion: pre5.0 - deprecationVersion: 5.0.0 - removalVersion: '%%NEXT_PRODUCTION_VERSION%%' - deprecationInfo: The NOTIFICATIONS_SMTP_ENCRYPTION values 'ssl' and 'tls' are deprecated - and will be removed in the future. + deprecationVersion: "" + removalVersion: "" + deprecationInfo: 'The NOTIFICATIONS_SMTP_ENCRYPTION values ''ssl'' and ''tls'' are + deprecated and will be removed in the future. | ' NOTIFICATIONS_SMTP_HOST: name: NOTIFICATIONS_SMTP_HOST defaultValue: "" @@ -7927,7 +7925,7 @@ OCIS_ASSET_THEMES_PATH: removalVersion: "" deprecationInfo: "" OCIS_ASYNC_UPLOADS: - name: OCIS_ASYNC_UPLOADS + name: OCIS_ASYNC_UPLOADS;SEARCH_EVENTS_ASYNC_UPLOADS defaultValue: "true" type: bool description: Enable asynchronous file uploads. @@ -7936,28 +7934,28 @@ OCIS_ASYNC_UPLOADS: removalVersion: "" deprecationInfo: "" OCIS_CACHE_AUTH_PASSWORD: - name: OCIS_CACHE_AUTH_PASSWORD;GATEWAY_CREATE_HOME_CACHE_AUTH_PASSWORD + name: OCIS_CACHE_AUTH_PASSWORD;STORAGE_SYSTEM_CACHE_AUTH_PASSWORD defaultValue: "" type: string - description: The password to use for authentication. Only applies when store type - 'nats-js-kv' is configured. + description: Password for the configured store. Only applies when store type 'nats-js-kv' + is configured. introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_CACHE_AUTH_USERNAME: - name: OCIS_CACHE_AUTH_USERNAME;GATEWAY_CREATE_HOME_CACHE_AUTH_USERNAME + name: OCIS_CACHE_AUTH_USERNAME;STORAGE_SYSTEM_CACHE_AUTH_USERNAME defaultValue: "" type: string - description: The username to use for authentication. Only applies when store type - 'nats-js-kv' is configured. + description: Username for the configured store. Only applies when store type 'nats-js-kv' + is configured. introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_CACHE_DATABASE: name: OCIS_CACHE_DATABASE - defaultValue: cache-createhome + defaultValue: storage-system type: string description: The database name the configured store should use. introductionVersion: pre5.0 @@ -7965,11 +7963,11 @@ OCIS_CACHE_DATABASE: removalVersion: "" deprecationInfo: "" OCIS_CACHE_DISABLE_PERSISTENCE: - name: OCIS_CACHE_DISABLE_PERSISTENCE;GATEWAY_CREATE_HOME_CACHE_DISABLE_PERSISTENCE + name: OCIS_CACHE_DISABLE_PERSISTENCE;STORAGE_SYSTEM_CACHE_DISABLE_PERSISTENCE defaultValue: "false" type: bool - description: Disables persistence of the create home cache. Only applies when store - type 'nats-js-kv' is configured. Defaults to false. + description: Disables persistence of the cache. Only applies when store type 'nats-js-kv' + is configured. Defaults to false. introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" @@ -7986,7 +7984,7 @@ OCIS_CACHE_SIZE: removalVersion: "" deprecationInfo: "" OCIS_CACHE_STORE: - name: OCIS_CACHE_STORE;GATEWAY_CREATE_HOME_CACHE_STORE + name: OCIS_CACHE_STORE;STORAGE_SYSTEM_CACHE_STORE defaultValue: memory type: string description: 'The type of the cache store. Supported values are: ''memory'', ''redis-sentinel'', @@ -7996,30 +7994,30 @@ OCIS_CACHE_STORE: removalVersion: "" deprecationInfo: "" OCIS_CACHE_STORE_NODES: - name: OCIS_CACHE_STORE_NODES;GATEWAY_CREATE_HOME_CACHE_STORE_NODES + name: OCIS_CACHE_STORE_NODES;STORAGE_SYSTEM_CACHE_STORE_NODES defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_CACHE_TTL: - name: OCIS_CACHE_TTL;GATEWAY_CREATE_HOME_CACHE_TTL - defaultValue: 5m0s + name: OCIS_CACHE_TTL;STORAGE_SYSTEM_CACHE_TTL + defaultValue: 24m0s type: Duration - description: Default time to live for user info in the cache. Only applied when - access tokens has no expiration. See the Environment Variable Types description + description: Default time to live for user info in the user info cache. Only applied + when access tokens has no expiration. See the Environment Variable Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_CORS_ALLOW_CREDENTIALS: - name: OCIS_CORS_ALLOW_CREDENTIALS;WEBDAV_CORS_ALLOW_CREDENTIALS + name: OCIS_CORS_ALLOW_CREDENTIALS;ACTIVITYLOG_CORS_ALLOW_CREDENTIALS defaultValue: "true" type: bool description: 'Allow credentials for CORS.See following chapter for more details: @@ -8029,9 +8027,9 @@ OCIS_CORS_ALLOW_CREDENTIALS: removalVersion: "" deprecationInfo: "" OCIS_CORS_ALLOW_HEADERS: - name: OCIS_CORS_ALLOW_HEADERS;WEBDAV_CORS_ALLOW_HEADERS + name: OCIS_CORS_ALLOW_HEADERS;ACTIVITYLOG_CORS_ALLOW_HEADERS defaultValue: '[Authorization Origin Content-Type Accept X-Requested-With X-Request-Id - Cache-Control]' + Ocs-Apirequest]' type: '[]string' description: 'A list of allowed CORS headers. See following chapter for more details: *Access-Control-Request-Headers* at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Request-Headers. @@ -8041,8 +8039,8 @@ OCIS_CORS_ALLOW_HEADERS: removalVersion: "" deprecationInfo: "" OCIS_CORS_ALLOW_METHODS: - name: OCIS_CORS_ALLOW_METHODS;WEBDAV_CORS_ALLOW_METHODS - defaultValue: '[GET POST PUT PATCH DELETE OPTIONS]' + name: OCIS_CORS_ALLOW_METHODS;ACTIVITYLOG_CORS_ALLOW_METHODS + defaultValue: '[GET]' type: '[]string' description: 'A list of allowed CORS methods. See following chapter for more details: *Access-Control-Request-Method* at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Request-Method. @@ -8052,7 +8050,7 @@ OCIS_CORS_ALLOW_METHODS: removalVersion: "" deprecationInfo: "" OCIS_CORS_ALLOW_ORIGINS: - name: OCIS_CORS_ALLOW_ORIGINS;WEBDAV_CORS_ALLOW_ORIGINS + name: OCIS_CORS_ALLOW_ORIGINS;ACTIVITYLOG_CORS_ALLOW_ORIGINS defaultValue: '[*]' type: '[]string' description: 'A list of allowed CORS origins. See following chapter for more details: @@ -8113,7 +8111,7 @@ OCIS_DEFAULT_LANGUAGE: type: string description: The default language used by services and the WebUI. If not defined, English will be used as default. See the documentation for more details. - introductionVersion: "5.0" + introductionVersion: '%%NEXT%%' deprecationVersion: "" removalVersion: "" deprecationInfo: "" @@ -8196,7 +8194,7 @@ OCIS_ENABLE_RESHARING: removalVersion: "" deprecationInfo: Resharing will be removed in the future. OCIS_EVENTS_AUTH_PASSWORD: - name: OCIS_EVENTS_AUTH_PASSWORD;SSE_EVENTS_AUTH_PASSWORD + name: OCIS_EVENTS_AUTH_PASSWORD defaultValue: "" type: string description: The password to authenticate with the events broker. The events broker @@ -8206,7 +8204,7 @@ OCIS_EVENTS_AUTH_PASSWORD: removalVersion: "" deprecationInfo: "" OCIS_EVENTS_AUTH_USERNAME: - name: OCIS_EVENTS_AUTH_USERNAME;SSE_EVENTS_AUTH_USERNAME + name: OCIS_EVENTS_AUTH_USERNAME defaultValue: "" type: string description: The username to authenticate with the events broker. The events broker @@ -8216,7 +8214,7 @@ OCIS_EVENTS_AUTH_USERNAME: removalVersion: "" deprecationInfo: "" OCIS_EVENTS_CLUSTER: - name: OCIS_EVENTS_CLUSTER;SSE_EVENTS_CLUSTER + name: OCIS_EVENTS_CLUSTER defaultValue: ocis-cluster type: string description: The clusterID of the event system. The event system is the message @@ -8227,7 +8225,7 @@ OCIS_EVENTS_CLUSTER: removalVersion: "" deprecationInfo: "" OCIS_EVENTS_ENABLE_TLS: - name: OCIS_EVENTS_ENABLE_TLS;SSE_EVENTS_ENABLE_TLS + name: OCIS_EVENTS_ENABLE_TLS defaultValue: "false" type: bool description: Enable TLS for the connection to the events broker. The events broker @@ -8237,7 +8235,7 @@ OCIS_EVENTS_ENABLE_TLS: removalVersion: "" deprecationInfo: "" OCIS_EVENTS_ENDPOINT: - name: OCIS_EVENTS_ENDPOINT;SSE_EVENTS_ENDPOINT + name: OCIS_EVENTS_ENDPOINT defaultValue: 127.0.0.1:9233 type: string description: The address of the event system. The event system is the message queuing @@ -8247,11 +8245,11 @@ OCIS_EVENTS_ENDPOINT: removalVersion: "" deprecationInfo: "" OCIS_EVENTS_TLS_ROOT_CA_CERTIFICATE: - name: OCIS_EVENTS_TLS_ROOT_CA_CERTIFICATE;SSE_EVENTS_TLS_ROOT_CA_CERTIFICATE + name: OCIS_EVENTS_TLS_ROOT_CA_CERTIFICATE defaultValue: "" type: string description: The root CA certificate used to validate the server's TLS certificate. - If provided SSE_EVENTS_TLS_INSECURE will be seen as false. + If provided NOTIFICATIONS_EVENTS_TLS_INSECURE will be seen as false. introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" @@ -8289,7 +8287,7 @@ OCIS_GRPC_CLIENT_TLS_MODE: removalVersion: "" deprecationInfo: "" OCIS_GRPC_PROTOCOL: - name: OCIS_GRPC_PROTOCOL;GROUPS_GRPC_PROTOCOL + name: OCIS_GRPC_PROTOCOL;APP_REGISTRY_GRPC_PROTOCOL defaultValue: "" type: string description: The transport protocol of the GRPC service. @@ -8329,7 +8327,7 @@ OCIS_HTTP_TLS_KEY: removalVersion: "" deprecationInfo: "" OCIS_INSECURE: - name: OCIS_INSECURE;SSE_EVENTS_TLS_INSECURE + name: OCIS_INSECURE defaultValue: "false" type: bool description: Whether to verify the server TLS certificates. @@ -8338,11 +8336,11 @@ OCIS_INSECURE: removalVersion: "" deprecationInfo: "" OCIS_JWT_SECRET: - name: OCIS_JWT_SECRET;SSE_JWT_SECRET + name: OCIS_JWT_SECRET;ACTIVITYLOG_JWT_SECRET defaultValue: "" type: string description: The secret to mint and validate jwt tokens. - introductionVersion: "5.0" + introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" deprecationInfo: "" @@ -8455,7 +8453,7 @@ OCIS_LDAP_DISABLED_USERS_GROUP_DN: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_BASE_DN: - name: OCIS_LDAP_GROUP_BASE_DN;GROUPS_LDAP_GROUP_BASE_DN + name: OCIS_LDAP_GROUP_BASE_DN;AUTH_BASIC_LDAP_GROUP_BASE_DN defaultValue: ou=groups,o=libregraph-idm type: string description: Search base DN for looking up LDAP groups. @@ -8464,7 +8462,7 @@ OCIS_LDAP_GROUP_BASE_DN: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_FILTER: - name: OCIS_LDAP_GROUP_FILTER;GROUPS_LDAP_GROUP_FILTER + name: OCIS_LDAP_GROUP_FILTER;AUTH_BASIC_LDAP_GROUP_FILTER defaultValue: "" type: string description: LDAP filter to add to the default filters for group searches. @@ -8473,7 +8471,7 @@ OCIS_LDAP_GROUP_FILTER: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_OBJECTCLASS: - name: OCIS_LDAP_GROUP_OBJECTCLASS;GROUPS_LDAP_GROUP_OBJECTCLASS + name: OCIS_LDAP_GROUP_OBJECTCLASS;AUTH_BASIC_LDAP_GROUP_OBJECTCLASS defaultValue: groupOfNames type: string description: The object class to use for groups in the default group search filter @@ -8483,7 +8481,7 @@ OCIS_LDAP_GROUP_OBJECTCLASS: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_SCHEMA_DISPLAYNAME: - name: OCIS_LDAP_GROUP_SCHEMA_DISPLAYNAME;GROUPS_LDAP_GROUP_SCHEMA_DISPLAYNAME + name: OCIS_LDAP_GROUP_SCHEMA_DISPLAYNAME;AUTH_BASIC_LDAP_GROUP_SCHEMA_DISPLAYNAME defaultValue: cn type: string description: LDAP Attribute to use for the displayname of groups (often the same @@ -8493,7 +8491,7 @@ OCIS_LDAP_GROUP_SCHEMA_DISPLAYNAME: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_SCHEMA_GROUPNAME: - name: OCIS_LDAP_GROUP_SCHEMA_GROUPNAME;GROUPS_LDAP_GROUP_SCHEMA_GROUPNAME + name: OCIS_LDAP_GROUP_SCHEMA_GROUPNAME;AUTH_BASIC_LDAP_GROUP_SCHEMA_GROUPNAME defaultValue: cn type: string description: LDAP Attribute to use for the name of groups. @@ -8502,28 +8500,28 @@ OCIS_LDAP_GROUP_SCHEMA_GROUPNAME: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_SCHEMA_ID: - name: OCIS_LDAP_GROUP_SCHEMA_ID;GROUPS_LDAP_GROUP_SCHEMA_ID + name: OCIS_LDAP_GROUP_SCHEMA_ID;AUTH_BASIC_LDAP_GROUP_SCHEMA_ID defaultValue: ownclouduuid type: string description: LDAP Attribute to use as the unique id for groups. This should be a - stable globally unique ID like a UUID. + stable globally unique id (e.g. a UUID). introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_SCHEMA_ID_IS_OCTETSTRING: - name: OCIS_LDAP_GROUP_SCHEMA_ID_IS_OCTETSTRING;GROUPS_LDAP_GROUP_SCHEMA_ID_IS_OCTETSTRING + name: OCIS_LDAP_GROUP_SCHEMA_ID_IS_OCTETSTRING;AUTH_BASIC_LDAP_GROUP_SCHEMA_ID_IS_OCTETSTRING defaultValue: "false" type: bool description: Set this to true if the defined 'id' attribute for groups is of the 'OCTETSTRING' syntax. This is e.g. required when using the 'objectGUID' attribute - of Active Directory for the group ID's. + of Active Directory for the group IDs. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_SCHEMA_MAIL: - name: OCIS_LDAP_GROUP_SCHEMA_MAIL;GROUPS_LDAP_GROUP_SCHEMA_MAIL + name: OCIS_LDAP_GROUP_SCHEMA_MAIL;AUTH_BASIC_LDAP_GROUP_SCHEMA_MAIL defaultValue: mail type: string description: LDAP Attribute to use for the email address of groups (can be empty). @@ -8532,7 +8530,7 @@ OCIS_LDAP_GROUP_SCHEMA_MAIL: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_SCHEMA_MEMBER: - name: OCIS_LDAP_GROUP_SCHEMA_MEMBER;GROUPS_LDAP_GROUP_SCHEMA_MEMBER + name: OCIS_LDAP_GROUP_SCHEMA_MEMBER;AUTH_BASIC_LDAP_GROUP_SCHEMA_MEMBER defaultValue: member type: string description: LDAP Attribute that is used for group members. @@ -8541,10 +8539,10 @@ OCIS_LDAP_GROUP_SCHEMA_MEMBER: removalVersion: "" deprecationInfo: "" OCIS_LDAP_GROUP_SCOPE: - name: OCIS_LDAP_GROUP_SCOPE;GROUPS_LDAP_GROUP_SCOPE + name: OCIS_LDAP_GROUP_SCOPE;AUTH_BASIC_LDAP_GROUP_SCOPE defaultValue: sub type: string - description: LDAP search scope to use when looking up groups. Supported scopes are + description: LDAP search scope to use when looking up groups. Supported values are 'base', 'one' and 'sub'. introductionVersion: pre5.0 deprecationVersion: "" @@ -8618,7 +8616,7 @@ OCIS_LDAP_USER_OBJECTCLASS: removalVersion: "" deprecationInfo: "" OCIS_LDAP_USER_SCHEMA_DISPLAYNAME: - name: OCIS_LDAP_USER_SCHEMA_DISPLAYNAME;GROUPS_LDAP_USER_SCHEMA_DISPLAYNAME + name: OCIS_LDAP_USER_SCHEMA_DISPLAYNAME;AUTH_BASIC_LDAP_USER_SCHEMA_DISPLAYNAME defaultValue: displayname type: string description: LDAP Attribute to use for the displayname of users. @@ -8636,12 +8634,12 @@ OCIS_LDAP_USER_SCHEMA_ID: removalVersion: "" deprecationInfo: "" OCIS_LDAP_USER_SCHEMA_ID_IS_OCTETSTRING: - name: OCIS_LDAP_USER_SCHEMA_ID_IS_OCTETSTRING;GROUPS_LDAP_USER_SCHEMA_ID_IS_OCTETSTRING + name: OCIS_LDAP_USER_SCHEMA_ID_IS_OCTETSTRING;AUTH_BASIC_LDAP_USER_SCHEMA_ID_IS_OCTETSTRING defaultValue: "false" type: bool description: Set this to true if the defined 'ID' attribute for users is of the 'OCTETSTRING' syntax. This is e.g. required when using the 'objectGUID' attribute - of Active Directory for the user ID's. + of Active Directory for the user IDs. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -8685,39 +8683,39 @@ OCIS_LDAP_USER_SCOPE: removalVersion: "" deprecationInfo: "" OCIS_LOG_COLOR: - name: OCIS_LOG_COLOR;WEBDAV_LOG_COLOR + name: OCIS_LOG_COLOR;ACTIVITYLOG_LOG_COLOR defaultValue: "false" type: bool description: Activates colorized log output. - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_LOG_FILE: - name: OCIS_LOG_FILE;WEBDAV_LOG_FILE + name: OCIS_LOG_FILE;ACTIVITYLOG_LOG_FILE defaultValue: "" type: string description: The path to the log file. Activates logging to this file if set. - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_LOG_LEVEL: - name: OCIS_LOG_LEVEL;WEBDAV_LOG_LEVEL + name: OCIS_LOG_LEVEL;ACTIVITYLOG_LOG_LEVEL defaultValue: "" type: string description: 'The log level. Valid values are: ''panic'', ''fatal'', ''error'', ''warn'', ''info'', ''debug'', ''trace''.' - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_LOG_PRETTY: - name: OCIS_LOG_PRETTY;WEBDAV_LOG_PRETTY + name: OCIS_LOG_PRETTY;ACTIVITYLOG_LOG_PRETTY defaultValue: "false" type: bool description: Activates pretty log output. - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" @@ -8819,18 +8817,17 @@ OCIS_PASSWORD_POLICY_MIN_UPPERCASE_CHARACTERS: removalVersion: "" deprecationInfo: "" OCIS_PERSISTENT_STORE: - name: OCIS_PERSISTENT_STORE;EVENTHISTORY_STORE + name: OCIS_PERSISTENT_STORE;ACTIVITYLOG_STORE defaultValue: nats-js-kv type: string - description: 'The type of the store. Supported values are: ''memory'', ''ocmem'', - ''etcd'', ''redis'', ''redis-sentinel'', ''nats-js'', ''noop''. See the text description - for details.' + description: 'The type of the store. Supported values are: ''memory'', ''nats-js-kv'', + ''redis-sentinel'', ''noop''. See the text description for details.' introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_PERSISTENT_STORE_AUTH_PASSWORD: - name: OCIS_PERSISTENT_STORE_AUTH_PASSWORD;EVENTHISTORY_STORE_AUTH_PASSWORD + name: OCIS_PERSISTENT_STORE_AUTH_PASSWORD;ACTIVITYLOG_STORE_AUTH_PASSWORD defaultValue: "" type: string description: The password to authenticate with the store. Only applies when store @@ -8840,7 +8837,7 @@ OCIS_PERSISTENT_STORE_AUTH_PASSWORD: removalVersion: "" deprecationInfo: "" OCIS_PERSISTENT_STORE_AUTH_USERNAME: - name: OCIS_PERSISTENT_STORE_AUTH_USERNAME;EVENTHISTORY_STORE_AUTH_USERNAME + name: OCIS_PERSISTENT_STORE_AUTH_USERNAME;ACTIVITYLOG_STORE_AUTH_USERNAME defaultValue: "" type: string description: The username to authenticate with the store. Only applies when store @@ -8850,13 +8847,13 @@ OCIS_PERSISTENT_STORE_AUTH_USERNAME: removalVersion: "" deprecationInfo: "" OCIS_PERSISTENT_STORE_NODES: - name: OCIS_PERSISTENT_STORE_NODES;EVENTHISTORY_STORE_NODES + name: OCIS_PERSISTENT_STORE_NODES;ACTIVITYLOG_STORE_NODES defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -8873,11 +8870,11 @@ OCIS_PERSISTENT_STORE_SIZE: removalVersion: "" deprecationInfo: "" OCIS_PERSISTENT_STORE_TTL: - name: OCIS_PERSISTENT_STORE_TTL;EVENTHISTORY_STORE_TTL - defaultValue: 336h0m0s + name: OCIS_PERSISTENT_STORE_TTL;ACTIVITYLOG_STORE_TTL + defaultValue: 0s type: Duration - description: Time to live for events in the store. Defaults to '336h' (2 weeks). - See the Environment Variable Types description for more details. + description: Time to live for events in the store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -8896,7 +8893,7 @@ OCIS_REVA_GATEWAY: defaultValue: com.owncloud.api.gateway type: string description: CS3 gateway used to look up user metadata - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" @@ -8923,7 +8920,7 @@ OCIS_REVA_GATEWAY_TLS_MODE: removalVersion: "" deprecationInfo: "" OCIS_SERVICE_ACCOUNT_ID: - name: OCIS_SERVICE_ACCOUNT_ID;USERLOG_SERVICE_ACCOUNT_ID + name: OCIS_SERVICE_ACCOUNT_ID;ACTIVITYLOG_SERVICE_ACCOUNT_ID defaultValue: "" type: string description: The ID of the service account the service should use. See the 'auth-service' @@ -8933,7 +8930,7 @@ OCIS_SERVICE_ACCOUNT_ID: removalVersion: "" deprecationInfo: "" OCIS_SERVICE_ACCOUNT_SECRET: - name: OCIS_SERVICE_ACCOUNT_SECRET;USERLOG_SERVICE_ACCOUNT_SECRET + name: OCIS_SERVICE_ACCOUNT_SECRET;ACTIVITYLOG_SERVICE_ACCOUNT_SECRET defaultValue: "" type: string description: The service account secret. @@ -8991,10 +8988,10 @@ OCIS_SYSTEM_USER_API_KEY: removalVersion: "" deprecationInfo: "" OCIS_SYSTEM_USER_ID: - name: OCIS_SYSTEM_USER_ID;SETTINGS_SYSTEM_USER_ID + name: OCIS_SYSTEM_USER_ID defaultValue: "" type: string - description: ID of the oCIS STORAGE-SYSTEM system user. Admins need to set the ID + description: ID of the oCIS storage-system system user. Admins need to set the ID for the STORAGE-SYSTEM system user in this config option which is then used to reference the user. Any reasonable long string is possible, preferably this would be an UUIDv4 format. @@ -9003,7 +9000,7 @@ OCIS_SYSTEM_USER_ID: removalVersion: "" deprecationInfo: "" OCIS_SYSTEM_USER_IDP: - name: OCIS_SYSTEM_USER_IDP;SETTINGS_SYSTEM_USER_IDP + name: OCIS_SYSTEM_USER_IDP;SHARING_PUBLIC_CS3_SYSTEM_USER_IDP defaultValue: internal type: string description: IDP of the oCIS STORAGE-SYSTEM system user. @@ -9012,40 +9009,40 @@ OCIS_SYSTEM_USER_IDP: removalVersion: "" deprecationInfo: "" OCIS_TRACING_COLLECTOR: - name: OCIS_TRACING_COLLECTOR;WEBDAV_TRACING_COLLECTOR + name: OCIS_TRACING_COLLECTOR;ACTIVITYLOG_TRACING_COLLECTOR defaultValue: "" type: string description: The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset. - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_TRACING_ENABLED: - name: OCIS_TRACING_ENABLED;WEBDAV_TRACING_ENABLED + name: OCIS_TRACING_ENABLED;ACTIVITYLOG_TRACING_ENABLED defaultValue: "false" type: bool description: Activates tracing. - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_TRACING_ENDPOINT: - name: OCIS_TRACING_ENDPOINT;WEBDAV_TRACING_ENDPOINT + name: OCIS_TRACING_ENDPOINT;ACTIVITYLOG_TRACING_ENDPOINT defaultValue: "" type: string description: The endpoint of the tracing agent. - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_TRACING_TYPE: - name: OCIS_TRACING_TYPE;WEBDAV_TRACING_TYPE + name: OCIS_TRACING_TYPE;ACTIVITYLOG_TRACING_TYPE defaultValue: "" type: string description: The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now. - introductionVersion: pre5.0 + introductionVersion: "5.0" deprecationVersion: "" removalVersion: "" deprecationInfo: "" @@ -9059,21 +9056,21 @@ OCIS_TRANSFER_SECRET: removalVersion: "" deprecationInfo: "" OCIS_TRANSLATION_PATH: - name: OCIS_TRANSLATION_PATH;USERLOG_TRANSLATION_PATH + name: OCIS_TRANSLATION_PATH;ACTIVITYLOG_TRANSLATION_PATH defaultValue: "" type: string description: (optional) Set this to a path with custom translations to overwrite the builtin translations. Note that file and folder naming rules apply, see the documentation for more details. - introductionVersion: pre5.0 + introductionVersion: '%%NEXT%%' deprecationVersion: "" removalVersion: "" deprecationInfo: "" OCIS_URL: - name: OCIS_URL;OCIS_PUBLIC_URL - defaultValue: https://127.0.0.1:9200 + name: OCIS_URL;OCIS_OIDC_ISSUER;IDP_ISS + defaultValue: https://localhost:9200 type: string - description: URL, where oCIS is reachable for users. + description: The OIDC issuer URL to use. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -10239,9 +10236,8 @@ POSTPROCESSING_STORE: name: OCIS_PERSISTENT_STORE;POSTPROCESSING_STORE defaultValue: nats-js-kv type: string - description: 'The type of the store. Supported values are: ''memory'', ''ocmem'', - ''etcd'', ''redis'', ''redis-sentinel'', ''nats-js'', ''noop''. See the text description - for details.' + description: 'The type of the store. Supported values are: ''memory'', ''redis-sentinel'', + ''nats-js-kv'', ''noop''. See the text description for details.' introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -10280,9 +10276,9 @@ POSTPROCESSING_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -10790,9 +10786,9 @@ PROXY_OIDC_USERINFO_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -11418,9 +11414,9 @@ SETTINGS_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -12960,9 +12956,9 @@ STORAGE_SYSTEM_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -13517,9 +13513,9 @@ STORAGE_USERS_FILEMETADATA_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -13648,9 +13644,9 @@ STORAGE_USERS_ID_CACHE_STORE_NODES: defaultValue: '[127.0.0.1:9233]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -15095,9 +15091,8 @@ USERLOG_STORE: name: OCIS_PERSISTENT_STORE;USERLOG_STORE defaultValue: memory type: string - description: 'The type of the store. Supported values are: ''memory'', ''ocmem'', - ''etcd'', ''redis'', ''redis-sentinel'', ''nats-js'', ''noop''. See the text description - for details.' + description: 'The type of the store. Supported values are: ''memory'', ''nats-js-kv'', + ''redis-sentinel'', ''noop''. See the text description for details.' introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" @@ -15136,9 +15131,9 @@ USERLOG_STORE_NODES: defaultValue: '[]' type: '[]string' description: A list of nodes to access the configured store. This has no effect - when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes - are used is dependent on the library of the configured store. See the Environment - Variable Types description for more details. + when 'memory' store is configured. Note that the behaviour how nodes are used + is dependent on the library of the configured store. See the Environment Variable + Types description for more details. introductionVersion: pre5.0 deprecationVersion: "" removalVersion: "" diff --git a/docs/helpers/extended_vars.yaml b/docs/helpers/extended_vars.yaml index 46135c0f163..6545845236c 100644 --- a/docs/helpers/extended_vars.yaml +++ b/docs/helpers/extended_vars.yaml @@ -76,7 +76,7 @@ variables: do_ignore: true - rawname: _registryEnv path: ocis-pkg/registry/registry.go:114 - foundincode: true + foundincode: false name: MICRO_REGISTRY type: string default_value: nats-js-kv @@ -87,7 +87,7 @@ variables: do_ignore: false - rawname: _registryAddressEnv path: ocis-pkg/registry/registry.go:118 - foundincode: true + foundincode: false name: MICRO_REGISTRY_ADDRESS type: "" default_value: "" @@ -162,6 +162,22 @@ variables: default_value: "" description: "" do_ignore: true +- rawname: _registryAddressEnv + path: ocis-pkg/registry/registry.go:91 + foundincode: true + name: _registryAddressEnv + type: "" + default_value: "" + description: "" + do_ignore: false +- rawname: _registryEnv + path: ocis-pkg/registry/registry.go:87 + foundincode: true + name: _registryEnv + type: "" + default_value: "" + description: "" + do_ignore: false - rawname: parts[0] path: ocis-pkg/config/envdecode/envdecode.go:382 foundincode: true From 3321e8def1ba9bb13b7e7da2e730f0d773a6ab41 Mon Sep 17 00:00:00 2001 From: Martin Mattel Date: Wed, 16 Oct 2024 10:20:36 +0200 Subject: [PATCH 6/6] fix extended envvars MICRO_REGISTRY --- docs/helpers/extended_vars.yaml | 30 ++++++------------------------ 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/docs/helpers/extended_vars.yaml b/docs/helpers/extended_vars.yaml index 6545845236c..6737303489f 100644 --- a/docs/helpers/extended_vars.yaml +++ b/docs/helpers/extended_vars.yaml @@ -75,19 +75,17 @@ variables: description: "" do_ignore: true - rawname: _registryEnv - path: ocis-pkg/registry/registry.go:114 - foundincode: false + path: ocis-pkg/registry/registry.go:87 + foundincode: true name: MICRO_REGISTRY type: string default_value: nats-js-kv - description: 'The Go micro registry type to use. Supported types are: ''memory'', - ''nats-js-kv'' (default) and ''kubernetes''. Note that ''nats'', ''etcd'', ''consul'' - and ''mdns'' are deprecated and will be removed in a later version. Only change - on supervision of ownCloud Support.' + description: 'The Go micro registry type to use. Supported types are: ''memory'' + and ''nats-js-kv'' (default). Only change on supervision of ownCloud Support.' do_ignore: false - rawname: _registryAddressEnv - path: ocis-pkg/registry/registry.go:118 - foundincode: false + path: ocis-pkg/registry/registry.go:91 + foundincode: true name: MICRO_REGISTRY_ADDRESS type: "" default_value: "" @@ -162,22 +160,6 @@ variables: default_value: "" description: "" do_ignore: true -- rawname: _registryAddressEnv - path: ocis-pkg/registry/registry.go:91 - foundincode: true - name: _registryAddressEnv - type: "" - default_value: "" - description: "" - do_ignore: false -- rawname: _registryEnv - path: ocis-pkg/registry/registry.go:87 - foundincode: true - name: _registryEnv - type: "" - default_value: "" - description: "" - do_ignore: false - rawname: parts[0] path: ocis-pkg/config/envdecode/envdecode.go:382 foundincode: true