From 76b27f162032649ddb3cb3f06ed24c7333b3fa66 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Sat, 4 May 2024 00:22:17 -0700 Subject: [PATCH] feat(all): auto-regenerate discovery clients (#2567) --- alloydb/v1/alloydb-api.json | 23 ++- alloydb/v1/alloydb-gen.go | 30 +++ alloydb/v1alpha/alloydb-api.json | 5 +- alloydb/v1alpha/alloydb-gen.go | 1 + api-list.json | 15 ++ bigquery/v2/bigquery-api.json | 4 +- bigquery/v2/bigquery-gen.go | 5 +- clouddeploy/v1/clouddeploy-api.json | 6 +- clouddeploy/v1/clouddeploy-gen.go | 6 +- dataform/v1beta1/dataform-api.json | 50 ++++- dataform/v1beta1/dataform-gen.go | 88 +++++++-- dialogflow/v3/dialogflow-api.json | 8 +- dialogflow/v3/dialogflow-gen.go | 1 + dialogflow/v3beta1/dialogflow-api.json | 8 +- dialogflow/v3beta1/dialogflow-gen.go | 1 + documentai/v1beta3/documentai-api.json | 9 +- documentai/v1beta3/documentai-gen.go | 3 + monitoring/v1/monitoring-api.json | 6 +- monitoring/v1/monitoring-gen.go | 10 +- monitoring/v3/monitoring-api.json | 4 +- monitoring/v3/monitoring-gen.go | 9 +- .../v1/networkmanagement-api.json | 6 +- networkmanagement/v1/networkmanagement-gen.go | 5 + .../v1beta1/networkmanagement-api.json | 6 +- .../v1beta1/networkmanagement-gen.go | 5 + privateca/v1/privateca-api.json | 12 +- privateca/v1/privateca-gen.go | 4 + .../v1/serviceconsumermanagement-api.json | 5 +- .../v1/serviceconsumermanagement-gen.go | 7 +- .../serviceconsumermanagement-api.json | 5 +- .../v1beta1/serviceconsumermanagement-gen.go | 7 +- spanner/v1/spanner-api.json | 39 +++- spanner/v1/spanner-gen.go | 182 ++++++++++++------ 33 files changed, 457 insertions(+), 118 deletions(-) diff --git a/alloydb/v1/alloydb-api.json b/alloydb/v1/alloydb-api.json index ec424d1b264..28166a1007a 100644 --- a/alloydb/v1/alloydb-api.json +++ b/alloydb/v1/alloydb-api.json @@ -1461,7 +1461,7 @@ } } }, - "revision": "20240417", + "revision": "20240424", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -1856,6 +1856,11 @@ "description": "Labels as key value pairs", "type": "object" }, + "maintenanceSchedule": { + "$ref": "MaintenanceSchedule", + "description": "Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set.", + "readOnly": true + }, "maintenanceUpdatePolicy": { "$ref": "MaintenanceUpdatePolicy", "description": "Optional. The maintenance update policy determines when to allow or deny updates." @@ -2596,6 +2601,19 @@ }, "type": "object" }, + "MaintenanceSchedule": { + "description": "MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead.", + "id": "MaintenanceSchedule", + "properties": { + "startTime": { + "description": "Output only. The scheduled start time for the maintenance.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "MaintenanceUpdatePolicy": { "description": "MaintenanceUpdatePolicy defines the policy for system updates.", "id": "MaintenanceUpdatePolicy", @@ -4209,6 +4227,7 @@ "PRODUCT_TYPE_ON_PREM", "ON_PREM", "PRODUCT_TYPE_MEMORYSTORE", + "PRODUCT_TYPE_BIGTABLE", "PRODUCT_TYPE_OTHER" ], "enumDeprecated": [ @@ -4221,6 +4240,7 @@ false, true, false, + false, false ], "enumDescriptions": [ @@ -4233,6 +4253,7 @@ "On premises database product.", "On premises database product.", "Memorystore product area in GCP", + "Bigtable product area in GCP", "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum." ], "type": "string" diff --git a/alloydb/v1/alloydb-gen.go b/alloydb/v1/alloydb-gen.go index 70c0d1f450b..b4fe25f85a1 100644 --- a/alloydb/v1/alloydb-gen.go +++ b/alloydb/v1/alloydb-gen.go @@ -588,6 +588,9 @@ type Cluster struct { InitialUser *UserPassword `json:"initialUser,omitempty"` // Labels: Labels as key value pairs Labels map[string]string `json:"labels,omitempty"` + // MaintenanceSchedule: Output only. The maintenance schedule for the cluster, + // generated for a specific rollout if a maintenance window is set. + MaintenanceSchedule *MaintenanceSchedule `json:"maintenanceSchedule,omitempty"` // MaintenanceUpdatePolicy: Optional. The maintenance update policy determines // when to allow or deny updates. MaintenanceUpdatePolicy *MaintenanceUpdatePolicy `json:"maintenanceUpdatePolicy,omitempty"` @@ -1467,6 +1470,32 @@ func (s *MachineConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) } +// MaintenanceSchedule: MaintenanceSchedule stores the maintenance schedule +// generated from the MaintenanceUpdatePolicy, once a maintenance rollout is +// triggered, if MaintenanceWindow is set, and if there is no conflicting +// DenyPeriod. The schedule is cleared once the update takes place. This field +// cannot be manually changed; modify the MaintenanceUpdatePolicy instead. +type MaintenanceSchedule struct { + // StartTime: Output only. The scheduled start time for the maintenance. + StartTime string `json:"startTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "StartTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "StartTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s *MaintenanceSchedule) MarshalJSON() ([]byte, error) { + type NoMethod MaintenanceSchedule + return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +} + // MaintenanceUpdatePolicy: MaintenanceUpdatePolicy defines the policy for // system updates. type MaintenanceUpdatePolicy struct { @@ -3155,6 +3184,7 @@ type StorageDatabasecenterProtoCommonProduct struct { // "PRODUCT_TYPE_ON_PREM" - On premises database product. // "ON_PREM" - On premises database product. // "PRODUCT_TYPE_MEMORYSTORE" - Memorystore product area in GCP + // "PRODUCT_TYPE_BIGTABLE" - Bigtable product area in GCP // "PRODUCT_TYPE_OTHER" - Other refers to rest of other product type. This is // to be when product type is known, but it is not present in this enum. Type string `json:"type,omitempty"` diff --git a/alloydb/v1alpha/alloydb-api.json b/alloydb/v1alpha/alloydb-api.json index 0beef60ece5..5e7a3f93855 100644 --- a/alloydb/v1alpha/alloydb-api.json +++ b/alloydb/v1alpha/alloydb-api.json @@ -1461,7 +1461,7 @@ } } }, - "revision": "20240417", + "revision": "20240424", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -4338,6 +4338,7 @@ "PRODUCT_TYPE_ON_PREM", "ON_PREM", "PRODUCT_TYPE_MEMORYSTORE", + "PRODUCT_TYPE_BIGTABLE", "PRODUCT_TYPE_OTHER" ], "enumDeprecated": [ @@ -4350,6 +4351,7 @@ false, true, false, + false, false ], "enumDescriptions": [ @@ -4362,6 +4364,7 @@ "On premises database product.", "On premises database product.", "Memorystore product area in GCP", + "Bigtable product area in GCP", "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum." ], "type": "string" diff --git a/alloydb/v1alpha/alloydb-gen.go b/alloydb/v1alpha/alloydb-gen.go index e9a5eaf52b1..e7a7956e11b 100644 --- a/alloydb/v1alpha/alloydb-gen.go +++ b/alloydb/v1alpha/alloydb-gen.go @@ -3309,6 +3309,7 @@ type StorageDatabasecenterProtoCommonProduct struct { // "PRODUCT_TYPE_ON_PREM" - On premises database product. // "ON_PREM" - On premises database product. // "PRODUCT_TYPE_MEMORYSTORE" - Memorystore product area in GCP + // "PRODUCT_TYPE_BIGTABLE" - Bigtable product area in GCP // "PRODUCT_TYPE_OTHER" - Other refers to rest of other product type. This is // to be when product type is known, but it is not present in this enum. Type string `json:"type,omitempty"` diff --git a/api-list.json b/api-list.json index 642fb119488..e7e1d47e3ab 100644 --- a/api-list.json +++ b/api-list.json @@ -2901,6 +2901,21 @@ "documentationLink": "https://cloud.google.com/essentialcontacts/docs/", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "eventarc:v1beta1", + "name": "eventarc", + "version": "v1beta1", + "title": "Eventarc API", + "description": "Build event-driven applications on Google Cloud Platform.", + "discoveryRestUrl": "https://eventarc.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/eventarc", + "preferred": false + }, { "kind": "discovery#directoryItem", "id": "eventarc:v1", diff --git a/bigquery/v2/bigquery-api.json b/bigquery/v2/bigquery-api.json index 1c534980633..fe8d8c931a7 100644 --- a/bigquery/v2/bigquery-api.json +++ b/bigquery/v2/bigquery-api.json @@ -1935,7 +1935,7 @@ } } }, - "revision": "20240418", + "revision": "20240423", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { @@ -9006,7 +9006,7 @@ "readOnly": true }, "replicationIntervalMs": { - "description": "Required. Specifies the interval at which the source table is polled for updates.", + "description": "Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied.", "format": "int64", "type": "string" }, diff --git a/bigquery/v2/bigquery-gen.go b/bigquery/v2/bigquery-gen.go index 81ec097267d..fc477a6ac86 100644 --- a/bigquery/v2/bigquery-gen.go +++ b/bigquery/v2/bigquery-gen.go @@ -9231,8 +9231,9 @@ type TableReplicationInfo struct { // ReplicationError: Optional. Output only. Replication error that will // permanently stopped table replication. ReplicationError *ErrorProto `json:"replicationError,omitempty"` - // ReplicationIntervalMs: Required. Specifies the interval at which the source - // table is polled for updates. + // ReplicationIntervalMs: Optional. Specifies the interval at which the source + // table is polled for updates. It's Optional. If not specified, default + // replication interval would be applied. ReplicationIntervalMs int64 `json:"replicationIntervalMs,omitempty,string"` // ReplicationStatus: Optional. Output only. Replication status of configured // replication. diff --git a/clouddeploy/v1/clouddeploy-api.json b/clouddeploy/v1/clouddeploy-api.json index 6a9fa890674..ae7c5a9b2d2 100644 --- a/clouddeploy/v1/clouddeploy-api.json +++ b/clouddeploy/v1/clouddeploy-api.json @@ -2065,7 +2065,7 @@ } } }, - "revision": "20240417", + "revision": "20240428", "rootUrl": "https://clouddeploy.googleapis.com/", "schemas": { "AbandonReleaseRequest": { @@ -2181,7 +2181,7 @@ "id": "AnthosCluster", "properties": { "membership": { - "description": "Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + "description": "Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", "type": "string" } }, @@ -3451,7 +3451,7 @@ "id": "GkeCluster", "properties": { "cluster": { - "description": "Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", + "description": "Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", "type": "string" }, "internalIp": { diff --git a/clouddeploy/v1/clouddeploy-gen.go b/clouddeploy/v1/clouddeploy-gen.go index 958fe5378cf..c93b864bb93 100644 --- a/clouddeploy/v1/clouddeploy-gen.go +++ b/clouddeploy/v1/clouddeploy-gen.go @@ -420,8 +420,8 @@ func (s *AdvanceRolloutRule) MarshalJSON() ([]byte, error) { // AnthosCluster: Information specifying an Anthos Cluster. type AnthosCluster struct { - // Membership: Membership of the GKE Hub-registered cluster to which to apply - // the Skaffold configuration. Format is + // Membership: Optional. Membership of the GKE Hub-registered cluster to which + // to apply the Skaffold configuration. Format is // `projects/{project}/locations/{location}/memberships/{membership_name}`. Membership string `json:"membership,omitempty"` // ForceSendFields is a list of field names (e.g. "Membership") to @@ -2012,7 +2012,7 @@ func (s *GatewayServiceMesh) MarshalJSON() ([]byte, error) { // GkeCluster: Information specifying a GKE Cluster. type GkeCluster struct { - // Cluster: Information specifying a GKE Cluster. Format is + // Cluster: Optional. Information specifying a GKE Cluster. Format is // `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. Cluster string `json:"cluster,omitempty"` // InternalIp: Optional. If true, `cluster` is accessed using the private IP diff --git a/dataform/v1beta1/dataform-api.json b/dataform/v1beta1/dataform-api.json index 1c939b2feee..39eb56cafd1 100644 --- a/dataform/v1beta1/dataform-api.json +++ b/dataform/v1beta1/dataform-api.json @@ -291,7 +291,7 @@ "$ref": "CommitRepositoryChangesRequest" }, "response": { - "$ref": "Empty" + "$ref": "CommitRepositoryChangesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2108,7 +2108,7 @@ } } }, - "revision": "20240420", + "revision": "20240427", "rootUrl": "https://dataform.googleapis.com/", "schemas": { "Assertion": { @@ -2339,6 +2339,17 @@ }, "type": "object" }, + "CommitRepositoryChangesResponse": { + "description": "`CommitRepositoryChanges` response message.", + "id": "CommitRepositoryChangesResponse", + "properties": { + "commitSha": { + "description": "The commit SHA of the current commit.", + "type": "string" + } + }, + "type": "object" + }, "CommitWorkspaceChangesRequest": { "description": "`CommitWorkspaceChanges` request message.", "id": "CommitWorkspaceChangesRequest", @@ -2404,6 +2415,11 @@ "readOnly": true, "type": "array" }, + "dataEncryptionState": { + "$ref": "DataEncryptionState", + "description": "Output only. Only set if the repository has a KMS Key.", + "readOnly": true + }, "dataformCoreVersion": { "description": "Output only. The version of `@dataform/core` that was used for compilation.", "readOnly": true, @@ -2496,6 +2512,17 @@ }, "type": "object" }, + "DataEncryptionState": { + "description": "Describes encryption state of a resource.", + "id": "DataEncryptionState", + "properties": { + "kmsKeyVersionName": { + "description": "The KMS key version name with which data of a resource is encrypted.", + "type": "string" + } + }, + "type": "object" + }, "Declaration": { "description": "Represents a relation which is not managed by Dataform but which may be referenced by Dataform actions.", "id": "Declaration", @@ -3557,6 +3584,11 @@ "readOnly": true, "type": "string" }, + "dataEncryptionState": { + "$ref": "DataEncryptionState", + "description": "Output only. A data encryption state of a Git repository if this Repository is protected by a KMS key.", + "readOnly": true + }, "displayName": { "description": "Optional. The repository's user-friendly name.", "type": "string" @@ -3565,6 +3597,10 @@ "$ref": "GitRemoteSettings", "description": "Optional. If set, configures this repository to be linked to a Git remote." }, + "kmsKeyName": { + "description": "Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. It is not possible to add or update the encryption key after the repository is created. Example: `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`", + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" @@ -3858,6 +3894,11 @@ "description": "Immutable. The name of the compilation result to use for this invocation. Must be in the format `projects/*/locations/*/repositories/*/compilationResults/*`.", "type": "string" }, + "dataEncryptionState": { + "$ref": "DataEncryptionState", + "description": "Output only. Only set if the repository has a KMS Key.", + "readOnly": true + }, "invocationConfig": { "$ref": "InvocationConfig", "description": "Immutable. If left unset, a default InvocationConfig will be used." @@ -3969,6 +4010,11 @@ "description": "Represents a Dataform Git workspace.", "id": "Workspace", "properties": { + "dataEncryptionState": { + "$ref": "DataEncryptionState", + "description": "Output only. A data encryption state of a Git repository if this Workspace is protected by a KMS key.", + "readOnly": true + }, "name": { "description": "Identifier. The workspace's name.", "type": "string" diff --git a/dataform/v1beta1/dataform-gen.go b/dataform/v1beta1/dataform-gen.go index 4bedb7007c5..2c732dda95b 100644 --- a/dataform/v1beta1/dataform-gen.go +++ b/dataform/v1beta1/dataform-gen.go @@ -604,6 +604,31 @@ func (s *CommitRepositoryChangesRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) } +// CommitRepositoryChangesResponse: `CommitRepositoryChanges` response message. +type CommitRepositoryChangesResponse struct { + // CommitSha: The commit SHA of the current commit. + CommitSha string `json:"commitSha,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CommitSha") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CommitSha") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s *CommitRepositoryChangesResponse) MarshalJSON() ([]byte, error) { + type NoMethod CommitRepositoryChangesResponse + return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +} + // CommitWorkspaceChangesRequest: `CommitWorkspaceChanges` request message. type CommitWorkspaceChangesRequest struct { // Author: Required. The commit's author. @@ -671,6 +696,8 @@ type CompilationResult struct { // CompilationErrors: Output only. Errors encountered during project // compilation. CompilationErrors []*CompilationError `json:"compilationErrors,omitempty"` + // DataEncryptionState: Output only. Only set if the repository has a KMS Key. + DataEncryptionState *DataEncryptionState `json:"dataEncryptionState,omitempty"` // DataformCoreVersion: Output only. The version of `@dataform/core` that was // used for compilation. DataformCoreVersion string `json:"dataformCoreVersion,omitempty"` @@ -785,6 +812,29 @@ func (s *ComputeRepositoryAccessTokenStatusResponse) MarshalJSON() ([]byte, erro return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) } +// DataEncryptionState: Describes encryption state of a resource. +type DataEncryptionState struct { + // KmsKeyVersionName: The KMS key version name with which data of a resource is + // encrypted. + KmsKeyVersionName string `json:"kmsKeyVersionName,omitempty"` + // ForceSendFields is a list of field names (e.g. "KmsKeyVersionName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "KmsKeyVersionName") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s *DataEncryptionState) MarshalJSON() ([]byte, error) { + type NoMethod DataEncryptionState + return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) +} + // Declaration: Represents a relation which is not managed by Dataform but // which may be referenced by Dataform actions. type Declaration struct { @@ -2255,11 +2305,21 @@ func (s *RemoveFileRequest) MarshalJSON() ([]byte, error) { type Repository struct { // CreateTime: Output only. The timestamp of when the repository was created. CreateTime string `json:"createTime,omitempty"` + // DataEncryptionState: Output only. A data encryption state of a Git + // repository if this Repository is protected by a KMS key. + DataEncryptionState *DataEncryptionState `json:"dataEncryptionState,omitempty"` // DisplayName: Optional. The repository's user-friendly name. DisplayName string `json:"displayName,omitempty"` // GitRemoteSettings: Optional. If set, configures this repository to be linked // to a Git remote. GitRemoteSettings *GitRemoteSettings `json:"gitRemoteSettings,omitempty"` + // KmsKeyName: Optional. The reference to a KMS encryption key. If provided, it + // will be used to encrypt user data in the repository and all child resources. + // It is not possible to add or update the encryption key after the repository + // is created. Example: + // `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKey + // s/[key]` + KmsKeyName string `json:"kmsKeyName,omitempty"` // Labels: Optional. Repository user labels. Labels map[string]string `json:"labels,omitempty"` // Name: Identifier. The repository's name. @@ -2694,6 +2754,8 @@ type WorkflowInvocation struct { // this invocation. Must be in the format // `projects/*/locations/*/repositories/*/compilationResults/*`. CompilationResult string `json:"compilationResult,omitempty"` + // DataEncryptionState: Output only. Only set if the repository has a KMS Key. + DataEncryptionState *DataEncryptionState `json:"dataEncryptionState,omitempty"` // InvocationConfig: Immutable. If left unset, a default InvocationConfig will // be used. InvocationConfig *InvocationConfig `json:"invocationConfig,omitempty"` @@ -2795,20 +2857,23 @@ func (s *WorkflowInvocationAction) MarshalJSON() ([]byte, error) { // Workspace: Represents a Dataform Git workspace. type Workspace struct { + // DataEncryptionState: Output only. A data encryption state of a Git + // repository if this Workspace is protected by a KMS key. + DataEncryptionState *DataEncryptionState `json:"dataEncryptionState,omitempty"` // Name: Identifier. The workspace's name. Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Name") to unconditionally - // include in API requests. By default, fields with empty or default values are - // omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "DataEncryptionState") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API requests - // with the JSON null value. By default, fields with empty values are omitted - // from API requests. See + // NullFields is a list of field names (e.g. "DataEncryptionState") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } @@ -3575,10 +3640,11 @@ func (c *ProjectsLocationsRepositoriesCommitCall) doRequest(alt string) (*http.R // Do executes the "dataform.projects.locations.repositories.commit" call. // Any non-2xx status code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was returned. -func (c *ProjectsLocationsRepositoriesCommitCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// *CommitRepositoryChangesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsRepositoriesCommitCall) Do(opts ...googleapi.CallOption) (*CommitRepositoryChangesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3597,7 +3663,7 @@ func (c *ProjectsLocationsRepositoriesCommitCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &CommitRepositoryChangesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, diff --git a/dialogflow/v3/dialogflow-api.json b/dialogflow/v3/dialogflow-api.json index 4044e504d68..069ea1fd5f7 100644 --- a/dialogflow/v3/dialogflow-api.json +++ b/dialogflow/v3/dialogflow-api.json @@ -4453,7 +4453,7 @@ } } }, - "revision": "20240422", + "revision": "20240430", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { @@ -7853,7 +7853,8 @@ "PARAMETER_FILLING", "NO_MATCH", "NO_INPUT", - "EVENT" + "EVENT", + "LLM" ], "enumDescriptions": [ "Not specified. Should never be used.", @@ -7862,7 +7863,8 @@ "The query was used for parameter filling.", "No match was found for the query.", "Indicates an empty query.", - "The query directly triggered an event." + "The query directly triggered an event.", + "The query was handled by an LLM." ], "type": "string" }, diff --git a/dialogflow/v3/dialogflow-gen.go b/dialogflow/v3/dialogflow-gen.go index e57b69a76af..5a802283018 100644 --- a/dialogflow/v3/dialogflow-gen.go +++ b/dialogflow/v3/dialogflow-gen.go @@ -5457,6 +5457,7 @@ type GoogleCloudDialogflowCxV3Match struct { // "NO_MATCH" - No match was found for the query. // "NO_INPUT" - Indicates an empty query. // "EVENT" - The query directly triggered an event. + // "LLM" - The query was handled by an LLM. MatchType string `json:"matchType,omitempty"` // Parameters: The collection of parameters extracted from the query. Depending // on your protocol or client library language, this is a map, associative diff --git a/dialogflow/v3beta1/dialogflow-api.json b/dialogflow/v3beta1/dialogflow-api.json index f45809689f8..45cc5953dc4 100644 --- a/dialogflow/v3beta1/dialogflow-api.json +++ b/dialogflow/v3beta1/dialogflow-api.json @@ -4551,7 +4551,7 @@ } } }, - "revision": "20240422", + "revision": "20240430", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { @@ -10615,7 +10615,8 @@ "PARAMETER_FILLING", "NO_MATCH", "NO_INPUT", - "EVENT" + "EVENT", + "LLM" ], "enumDescriptions": [ "Not specified. Should never be used.", @@ -10624,7 +10625,8 @@ "The query was used for parameter filling.", "No match was found for the query.", "Indicates an empty query.", - "The query directly triggered an event." + "The query directly triggered an event.", + "The query was handled by an LLM." ], "type": "string" }, diff --git a/dialogflow/v3beta1/dialogflow-gen.go b/dialogflow/v3beta1/dialogflow-gen.go index 3f6cd155dc8..318c540b7c4 100644 --- a/dialogflow/v3beta1/dialogflow-gen.go +++ b/dialogflow/v3beta1/dialogflow-gen.go @@ -9236,6 +9236,7 @@ type GoogleCloudDialogflowCxV3beta1Match struct { // "NO_MATCH" - No match was found for the query. // "NO_INPUT" - Indicates an empty query. // "EVENT" - The query directly triggered an event. + // "LLM" - The query was handled by an LLM. MatchType string `json:"matchType,omitempty"` // Parameters: The collection of parameters extracted from the query. Depending // on your protocol or client library language, this is a map, associative diff --git a/documentai/v1beta3/documentai-api.json b/documentai/v1beta3/documentai-api.json index 6b833caa62b..b48e4c6712f 100644 --- a/documentai/v1beta3/documentai-api.json +++ b/documentai/v1beta3/documentai-api.json @@ -1284,7 +1284,7 @@ } } }, - "revision": "20240417", + "revision": "20240502", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": { @@ -5984,6 +5984,13 @@ "pageSpan": { "$ref": "GoogleCloudDocumentaiV1beta3DocumentChunkedDocumentChunkChunkPageSpan", "description": "Page span of the chunk." + }, + "sourceBlockIds": { + "description": "DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" diff --git a/documentai/v1beta3/documentai-gen.go b/documentai/v1beta3/documentai-gen.go index 6668d186855..fe25dd7a48e 100644 --- a/documentai/v1beta3/documentai-gen.go +++ b/documentai/v1beta3/documentai-gen.go @@ -6399,6 +6399,9 @@ type GoogleCloudDocumentaiV1beta3DocumentChunkedDocumentChunk struct { PageHeaders []*GoogleCloudDocumentaiV1beta3DocumentChunkedDocumentChunkChunkPageHeader `json:"pageHeaders,omitempty"` // PageSpan: Page span of the chunk. PageSpan *GoogleCloudDocumentaiV1beta3DocumentChunkedDocumentChunkChunkPageSpan `json:"pageSpan,omitempty"` + // SourceBlockIds: DO NOT USE. List of all parsed documents layout source + // blocks used to generate the chunk. + SourceBlockIds []string `json:"sourceBlockIds,omitempty"` // ForceSendFields is a list of field names (e.g. "ChunkId") to unconditionally // include in API requests. By default, fields with empty or default values are // omitted from API requests. See diff --git a/monitoring/v1/monitoring-api.json b/monitoring/v1/monitoring-api.json index 951945bc515..d549adc6052 100644 --- a/monitoring/v1/monitoring-api.json +++ b/monitoring/v1/monitoring-api.json @@ -753,7 +753,7 @@ } } }, - "revision": "20240121", + "revision": "20240427", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -1086,6 +1086,10 @@ "description": "A filter to reduce the amount of data charted in relevant widgets.", "id": "DashboardFilter", "properties": { + "applyToNewWidgets": { + "description": "Whether to apply this filter to new widgets by default", + "type": "boolean" + }, "filterType": { "description": "The specified filter type", "enum": [ diff --git a/monitoring/v1/monitoring-gen.go b/monitoring/v1/monitoring-gen.go index d69f86587d0..ed1a93f06e2 100644 --- a/monitoring/v1/monitoring-gen.go +++ b/monitoring/v1/monitoring-gen.go @@ -879,6 +879,8 @@ func (s *Dashboard) MarshalJSON() ([]byte, error) { // DashboardFilter: A filter to reduce the amount of data charted in relevant // widgets. type DashboardFilter struct { + // ApplyToNewWidgets: Whether to apply this filter to new widgets by default + ApplyToNewWidgets bool `json:"applyToNewWidgets,omitempty"` // FilterType: The specified filter type // // Possible values: @@ -898,15 +900,15 @@ type DashboardFilter struct { // string or MQL query. If omitted, the dashboard filter will be applied to all // relevant widgets in the dashboard. TemplateVariable string `json:"templateVariable,omitempty"` - // ForceSendFields is a list of field names (e.g. "FilterType") to + // ForceSendFields is a list of field names (e.g. "ApplyToNewWidgets") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "FilterType") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "ApplyToNewWidgets") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } diff --git a/monitoring/v3/monitoring-api.json b/monitoring/v3/monitoring-api.json index b030ebabc2e..d8d47fb3d1b 100644 --- a/monitoring/v3/monitoring-api.json +++ b/monitoring/v3/monitoring-api.json @@ -2714,7 +2714,7 @@ } } }, - "revision": "20240414", + "revision": "20240427", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -5389,7 +5389,7 @@ "type": "object" }, "ServiceAgentAuthentication": { - "description": "Contains information needed for generating an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC token will be generated for the Monitoring service agent service account.", + "description": "Contains information needed for generating either an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth token (https://developers.google.com/identity/protocols/oauth2). The token will be generated for the Monitoring service agent service account.", "id": "ServiceAgentAuthentication", "properties": { "type": { diff --git a/monitoring/v3/monitoring-gen.go b/monitoring/v3/monitoring-gen.go index 675aa2c09ff..6581dbbf4e9 100644 --- a/monitoring/v3/monitoring-gen.go +++ b/monitoring/v3/monitoring-gen.go @@ -4575,10 +4575,11 @@ func (s *MService) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) } -// ServiceAgentAuthentication: Contains information needed for generating an -// OpenID Connect token -// (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC -// token will be generated for the Monitoring service agent service account. +// ServiceAgentAuthentication: Contains information needed for generating +// either an OpenID Connect token +// (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth +// token (https://developers.google.com/identity/protocols/oauth2). The token +// will be generated for the Monitoring service agent service account. type ServiceAgentAuthentication struct { // Type: Type of authentication. // diff --git a/networkmanagement/v1/networkmanagement-api.json b/networkmanagement/v1/networkmanagement-api.json index 740d1e60cb0..147ced24ab7 100644 --- a/networkmanagement/v1/networkmanagement-api.json +++ b/networkmanagement/v1/networkmanagement-api.json @@ -591,7 +591,7 @@ } } }, - "revision": "20240417", + "revision": "20240424", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { @@ -1113,6 +1113,8 @@ "CLOUD_FUNCTION_NOT_ACTIVE", "VPC_CONNECTOR_NOT_SET", "VPC_CONNECTOR_NOT_RUNNING", + "VPC_CONNECTOR_SERVERLESS_TRAFFIC_BLOCKED", + "VPC_CONNECTOR_HEALTH_CHECK_TRAFFIC_BLOCKED", "FORWARDING_RULE_REGION_MISMATCH", "PSC_CONNECTION_NOT_ACCEPTED", "PSC_ENDPOINT_ACCESSED_FROM_PEERED_NETWORK", @@ -1175,6 +1177,8 @@ "Packet could be dropped because the Cloud Function is not in an active status.", "Packet could be dropped because no VPC connector is set.", "Packet could be dropped because the VPC connector is not in a running state.", + "Packet could be dropped because the traffic from the serverless service to the VPC connector is not allowed.", + "Packet could be dropped because the health check traffic to the VPC connector is not allowed.", "Packet could be dropped because it was sent from a different region to a regional forwarding without global access.", "The Private Service Connect endpoint is in a project that is not approved to connect to the service.", "The packet is sent to the Private Service Connect endpoint over the peering, but [it's not supported](https://cloud.google.com/vpc/docs/configure-private-service-connect-services#on-premises).", diff --git a/networkmanagement/v1/networkmanagement-gen.go b/networkmanagement/v1/networkmanagement-gen.go index 486e7c00c94..4db2c20e3f5 100644 --- a/networkmanagement/v1/networkmanagement-gen.go +++ b/networkmanagement/v1/networkmanagement-gen.go @@ -945,6 +945,11 @@ type DropInfo struct { // is set. // "VPC_CONNECTOR_NOT_RUNNING" - Packet could be dropped because the VPC // connector is not in a running state. + // "VPC_CONNECTOR_SERVERLESS_TRAFFIC_BLOCKED" - Packet could be dropped + // because the traffic from the serverless service to the VPC connector is not + // allowed. + // "VPC_CONNECTOR_HEALTH_CHECK_TRAFFIC_BLOCKED" - Packet could be dropped + // because the health check traffic to the VPC connector is not allowed. // "FORWARDING_RULE_REGION_MISMATCH" - Packet could be dropped because it was // sent from a different region to a regional forwarding without global access. // "PSC_CONNECTION_NOT_ACCEPTED" - The Private Service Connect endpoint is in diff --git a/networkmanagement/v1beta1/networkmanagement-api.json b/networkmanagement/v1beta1/networkmanagement-api.json index a6805d55072..e445a9bc90e 100644 --- a/networkmanagement/v1beta1/networkmanagement-api.json +++ b/networkmanagement/v1beta1/networkmanagement-api.json @@ -758,7 +758,7 @@ } } }, - "revision": "20240417", + "revision": "20240424", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { @@ -1280,6 +1280,8 @@ "CLOUD_FUNCTION_NOT_ACTIVE", "VPC_CONNECTOR_NOT_SET", "VPC_CONNECTOR_NOT_RUNNING", + "VPC_CONNECTOR_SERVERLESS_TRAFFIC_BLOCKED", + "VPC_CONNECTOR_HEALTH_CHECK_TRAFFIC_BLOCKED", "FORWARDING_RULE_REGION_MISMATCH", "PSC_CONNECTION_NOT_ACCEPTED", "PSC_ENDPOINT_ACCESSED_FROM_PEERED_NETWORK", @@ -1342,6 +1344,8 @@ "Packet could be dropped because the Cloud Function is not in an active status.", "Packet could be dropped because no VPC connector is set.", "Packet could be dropped because the VPC connector is not in a running state.", + "Packet could be dropped because the traffic from the serverless service to the VPC connector is not allowed.", + "Packet could be dropped because the health check traffic to the VPC connector is not allowed.", "Packet could be dropped because it was sent from a different region to a regional forwarding without global access.", "The Private Service Connect endpoint is in a project that is not approved to connect to the service.", "The packet is sent to the Private Service Connect endpoint over the peering, but [it's not supported](https://cloud.google.com/vpc/docs/configure-private-service-connect-services#on-premises).", diff --git a/networkmanagement/v1beta1/networkmanagement-gen.go b/networkmanagement/v1beta1/networkmanagement-gen.go index 3166642deed..34ce7564c2d 100644 --- a/networkmanagement/v1beta1/networkmanagement-gen.go +++ b/networkmanagement/v1beta1/networkmanagement-gen.go @@ -957,6 +957,11 @@ type DropInfo struct { // is set. // "VPC_CONNECTOR_NOT_RUNNING" - Packet could be dropped because the VPC // connector is not in a running state. + // "VPC_CONNECTOR_SERVERLESS_TRAFFIC_BLOCKED" - Packet could be dropped + // because the traffic from the serverless service to the VPC connector is not + // allowed. + // "VPC_CONNECTOR_HEALTH_CHECK_TRAFFIC_BLOCKED" - Packet could be dropped + // because the health check traffic to the VPC connector is not allowed. // "FORWARDING_RULE_REGION_MISMATCH" - Packet could be dropped because it was // sent from a different region to a regional forwarding without global access. // "PSC_CONNECTION_NOT_ACCEPTED" - The Private Service Connect endpoint is in diff --git a/privateca/v1/privateca-api.json b/privateca/v1/privateca-api.json index 4dfe0ad58e6..055a2821f26 100644 --- a/privateca/v1/privateca-api.json +++ b/privateca/v1/privateca-api.json @@ -1605,7 +1605,7 @@ } } }, - "revision": "20240410", + "revision": "20240424", "rootUrl": "https://privateca.googleapis.com/", "schemas": { "AccessUrls": { @@ -1968,6 +1968,16 @@ "readOnly": true, "type": "array" }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, + "satisfiesPzs": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "state": { "description": "Output only. The State for this CertificateAuthority.", "enum": [ diff --git a/privateca/v1/privateca-gen.go b/privateca/v1/privateca-gen.go index cc8094712e7..df14893eb7d 100644 --- a/privateca/v1/privateca-gen.go +++ b/privateca/v1/privateca-gen.go @@ -749,6 +749,10 @@ type CertificateAuthority struct { // For a self-signed CA, this will only list the current CertificateAuthority's // certificate. PemCaCertificates []string `json:"pemCaCertificates,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: Output only. Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` // State: Output only. The State for this CertificateAuthority. // // Possible values: diff --git a/serviceconsumermanagement/v1/serviceconsumermanagement-api.json b/serviceconsumermanagement/v1/serviceconsumermanagement-api.json index 669d8ffe93a..81a8d7bbb27 100644 --- a/serviceconsumermanagement/v1/serviceconsumermanagement-api.json +++ b/serviceconsumermanagement/v1/serviceconsumermanagement-api.json @@ -542,7 +542,7 @@ } } }, - "revision": "20240414", + "revision": "20240429", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "AddTenantProjectRequest": { @@ -1249,8 +1249,7 @@ "id": "Endpoint", "properties": { "aliases": { - "deprecated": true, - "description": "Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", + "description": "Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints.", "items": { "type": "string" }, diff --git a/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go b/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go index 68ed0d7068c..6d59f7a14bd 100644 --- a/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go +++ b/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go @@ -1272,10 +1272,9 @@ type Empty struct { // to be passed to the API frontend, for it # to decide whether the subsequent // cross-origin request is allowed # to proceed. allow_cors: true type Endpoint struct { - // Aliases: Unimplemented. Dot not use. DEPRECATED: This field is no longer - // supported. Instead of using aliases, please specify multiple - // google.api.Endpoint for each of the intended aliases. Additional names that - // this endpoint will be hosted on. + // Aliases: Aliases for this endpoint, these will be served by the same UrlMap + // as the parent endpoint, and will be provisioned in the GCP stack for the + // Regional Endpoints. Aliases []string `json:"aliases,omitempty"` // AllowCors: Allowing CORS // (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka diff --git a/serviceconsumermanagement/v1beta1/serviceconsumermanagement-api.json b/serviceconsumermanagement/v1beta1/serviceconsumermanagement-api.json index 13db8171d58..c53e83f60fc 100644 --- a/serviceconsumermanagement/v1beta1/serviceconsumermanagement-api.json +++ b/serviceconsumermanagement/v1beta1/serviceconsumermanagement-api.json @@ -500,7 +500,7 @@ } } }, - "revision": "20240414", + "revision": "20240429", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "Api": { @@ -1119,8 +1119,7 @@ "id": "Endpoint", "properties": { "aliases": { - "deprecated": true, - "description": "Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", + "description": "Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints.", "items": { "type": "string" }, diff --git a/serviceconsumermanagement/v1beta1/serviceconsumermanagement-gen.go b/serviceconsumermanagement/v1beta1/serviceconsumermanagement-gen.go index a5a58279695..85e20e0962f 100644 --- a/serviceconsumermanagement/v1beta1/serviceconsumermanagement-gen.go +++ b/serviceconsumermanagement/v1beta1/serviceconsumermanagement-gen.go @@ -1129,10 +1129,9 @@ type Empty struct { // to be passed to the API frontend, for it # to decide whether the subsequent // cross-origin request is allowed # to proceed. allow_cors: true type Endpoint struct { - // Aliases: Unimplemented. Dot not use. DEPRECATED: This field is no longer - // supported. Instead of using aliases, please specify multiple - // google.api.Endpoint for each of the intended aliases. Additional names that - // this endpoint will be hosted on. + // Aliases: Aliases for this endpoint, these will be served by the same UrlMap + // as the parent endpoint, and will be provisioned in the GCP stack for the + // Regional Endpoints. Aliases []string `json:"aliases,omitempty"` // AllowCors: Allowing CORS // (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka diff --git a/spanner/v1/spanner-api.json b/spanner/v1/spanner-api.json index 30b86093e9a..bc4bf2f68b0 100644 --- a/spanner/v1/spanner-api.json +++ b/spanner/v1/spanner-api.json @@ -978,6 +978,12 @@ "location": "query", "type": "string" }, + "encryptionConfig.kmsKeyNames": { + "description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", + "location": "query", + "repeated": true, + "type": "string" + }, "parent": { "description": "Required. The name of the instance in which the backup will be created. This must be the same instance that contains the database the backup will be created from. The backup will be stored in the location(s) specified in the instance configuration of this instance. Values are of the form `projects//instances/`.", "location": "path", @@ -2970,7 +2976,7 @@ } } }, - "revision": "20240422", + "revision": "20240423", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "AutoscalingConfig": { @@ -3066,6 +3072,14 @@ "description": "Output only. The encryption information for the backup.", "readOnly": true }, + "encryptionInformation": { + "description": "Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined.", + "items": { + "$ref": "EncryptionInfo" + }, + "readOnly": true, + "type": "array" + }, "expireTime": { "description": "Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup.", "format": "google-datetime", @@ -3409,6 +3423,13 @@ "kmsKeyName": { "description": "Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`.", "type": "string" + }, + "kmsKeyNames": { + "description": "Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. Kms keys specified can be in any order. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -3907,6 +3928,13 @@ "kmsKeyName": { "description": "The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`.", "type": "string" + }, + "kmsKeyNames": { + "description": "Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -5725,6 +5753,13 @@ "kmsKeyName": { "description": "Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored database. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`.", "type": "string" + }, + "kmsKeyNames": { + "description": "Optional. Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -6133,7 +6168,7 @@ "type": "object" }, "TransactionOptions": { - "description": "Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps \u003c= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a \"negotiation phase\" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as \"version GC\". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.", + "description": "Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps \u003c= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a \"negotiation phase\" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as \"version GC\". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.", "id": "TransactionOptions", "properties": { "excludeTxnFromChangeStreams": { diff --git a/spanner/v1/spanner-gen.go b/spanner/v1/spanner-gen.go index 423749b27f4..c5bf226e01d 100644 --- a/spanner/v1/spanner-gen.go +++ b/spanner/v1/spanner-gen.go @@ -521,6 +521,14 @@ type Backup struct { DatabaseDialect string `json:"databaseDialect,omitempty"` // EncryptionInfo: Output only. The encryption information for the backup. EncryptionInfo *EncryptionInfo `json:"encryptionInfo,omitempty"` + // EncryptionInformation: Output only. The encryption information for the + // backup, whether it is protected by one or more KMS keys. The information + // includes all Cloud KMS key versions used to encrypt the backup. The + // `encryption_status' field inside of each `EncryptionInfo` is not populated. + // At least one of the key versions must be available for the backup to be + // restored. If a key version is revoked in the middle of a restore, the + // restore behavior is undefined. + EncryptionInformation []*EncryptionInfo `json:"encryptionInformation,omitempty"` // ExpireTime: Required for the CreateBackup operation. The expiration time of // the backup, with microseconds granularity that must be at least 6 hours and // at most 366 days from the time the CreateBackup request is processed. Once @@ -1078,6 +1086,19 @@ type CopyBackupEncryptionConfig struct { // `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form // `projects//locations//keyRings//cryptoKeys/`. KmsKeyName string `json:"kmsKeyName,omitempty"` + // KmsKeyNames: Optional. Specifies the KMS configuration for the one or more + // keys used to protect the backup. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. Kms keys specified can be in + // any order. The keys referenced by kms_key_names must fully cover all regions + // of the backup's instance configuration. Some examples: * For single region + // instance configs, specify a single regional location KMS key. * For + // multi-regional instance configs of type GOOGLE_MANAGED, either specify a + // multi-regional location KMS key or multiple regional location KMS keys that + // cover all regions in the instance config. * For an instance config of type + // USER_MANAGED, please specify only regional location KMS keys to cover each + // region in the instance config. Multi-regional location KMS keys are not + // supported for USER_MANAGED instance configs. + KmsKeyNames []string `json:"kmsKeyNames,omitempty"` // ForceSendFields is a list of field names (e.g. "EncryptionType") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -1797,6 +1818,19 @@ type EncryptionConfig struct { // database. Values are of the form // `projects//locations//keyRings//cryptoKeys/`. KmsKeyName string `json:"kmsKeyName,omitempty"` + // KmsKeyNames: Specifies the KMS configuration for the one or more keys used + // to encrypt the database. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. The keys referenced by + // kms_key_names must fully cover all regions of the database instance + // configuration. Some examples: * For single region database instance configs, + // specify a single regional location KMS key. * For multi-regional database + // instance configs of type GOOGLE_MANAGED, either specify a multi-regional + // location KMS key or multiple regional location KMS keys that cover all + // regions in the instance config. * For a database instance config of type + // USER_MANAGED, please specify only regional location KMS keys to cover each + // region in the instance config. Multi-regional location KMS keys are not + // supported for USER_MANAGED instance configs. + KmsKeyNames []string `json:"kmsKeyNames,omitempty"` // ForceSendFields is a list of field names (e.g. "KmsKeyName") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -4500,6 +4534,19 @@ type RestoreDatabaseEncryptionConfig struct { // `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form // `projects//locations//keyRings//cryptoKeys/`. KmsKeyName string `json:"kmsKeyName,omitempty"` + // KmsKeyNames: Optional. Specifies the KMS configuration for the one or more + // keys used to encrypt the database. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. The keys referenced by + // kms_key_names must fully cover all regions of the database instance + // configuration. Some examples: * For single region database instance configs, + // specify a single regional location KMS key. * For multi-regional database + // instance configs of type GOOGLE_MANAGED, either specify a multi-regional + // location KMS key or multiple regional location KMS keys that cover all + // regions in the instance config. * For a database instance config of type + // USER_MANAGED, please specify only regional location KMS keys to cover each + // region in the instance config. Multi-regional location KMS keys are not + // supported for USER_MANAGED instance configs. + KmsKeyNames []string `json:"kmsKeyNames,omitempty"` // ForceSendFields is a list of field names (e.g. "EncryptionType") to // unconditionally include in API requests. By default, fields with empty or // default values are omitted from API requests. See @@ -5166,59 +5213,64 @@ func (s *Transaction) MarshalJSON() ([]byte, error) { // successfully committing the retry, the client should execute the retry in // the same session as the original attempt. The original session's lock // priority increases with each consecutive abort, meaning that each attempt -// has a slightly better chance of success than the previous. Under some -// circumstances (for example, many transactions attempting to modify the same -// row(s)), a transaction can abort many times in a short period before -// successfully committing. Thus, it is not a good idea to cap the number of -// retries a transaction can attempt; instead, it is better to limit the total -// amount of time spent retrying. Idle transactions: A transaction is -// considered idle if it has no outstanding reads or SQL queries and has not -// started a read or SQL query within the last 10 seconds. Idle transactions -// can be aborted by Cloud Spanner so that they don't hold on to locks -// indefinitely. If an idle transaction is aborted, the commit will fail with -// error `ABORTED`. If this behavior is undesirable, periodically executing a -// simple SQL query in the transaction (for example, `SELECT 1`) prevents the -// transaction from becoming idle. Snapshot read-only transactions: Snapshot -// read-only transactions provides a simpler method than locking read-write -// transactions for doing several consistent reads. However, this type of -// transaction does not support writes. Snapshot transactions do not take -// locks. Instead, they work by choosing a Cloud Spanner timestamp, then -// executing all reads at that timestamp. Since they do not acquire locks, they -// do not block concurrent read-write transactions. Unlike locking read-write -// transactions, snapshot read-only transactions never abort. They can fail if -// the chosen read timestamp is garbage collected; however, the default garbage -// collection policy is generous enough that most applications do not need to -// worry about this in practice. Snapshot read-only transactions do not need to -// call Commit or Rollback (and in fact are not permitted to do so). To execute -// a snapshot transaction, the client specifies a timestamp bound, which tells -// Cloud Spanner how to choose a read timestamp. The types of timestamp bound -// are: - Strong (the default). - Bounded staleness. - Exact staleness. If the -// Cloud Spanner database to be read is geographically distributed, stale -// read-only transactions can execute more quickly than strong or read-write -// transactions, because they are able to execute far from the leader replica. -// Each type of timestamp bound is discussed in detail below. Strong: Strong -// reads are guaranteed to see the effects of all transactions that have -// committed before the start of the read. Furthermore, all rows yielded by a -// single read are consistent with each other -- if any part of the read -// observes a transaction, all parts of the read see the transaction. Strong -// reads are not repeatable: two consecutive strong read-only transactions -// might return inconsistent results if there are concurrent writes. If -// consistency across reads is required, the reads should be executed within a -// transaction or at an exact read timestamp. Queries on change streams (see -// below for more details) must also specify the strong read timestamp bound. -// See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp -// bounds execute reads at a user-specified timestamp. Reads at a timestamp are -// guaranteed to see a consistent prefix of the global transaction history: -// they observe modifications done by all transactions with a commit timestamp -// less than or equal to the read timestamp, and observe none of the -// modifications done by transactions with a larger commit timestamp. They will -// block until all conflicting transactions that may be assigned commit -// timestamps <= the read timestamp have finished. The timestamp can either be -// expressed as an absolute Cloud Spanner commit timestamp or a staleness -// relative to the current time. These modes do not require a "negotiation -// phase" to pick a timestamp. As a result, they execute slightly faster than -// the equivalent boundedly stale concurrency modes. On the other hand, -// boundedly stale reads usually return fresher results. See +// has a slightly better chance of success than the previous. Note that the +// lock priority is preserved per session (not per transaction). Lock priority +// is set by the first read or write in the first attempt of a read-write +// transaction. If the application starts a new session to retry the whole +// transaction, the transaction loses its original lock priority. Moreover, the +// lock priority is only preserved if the transaction fails with an `ABORTED` +// error. Under some circumstances (for example, many transactions attempting +// to modify the same row(s)), a transaction can abort many times in a short +// period before successfully committing. Thus, it is not a good idea to cap +// the number of retries a transaction can attempt; instead, it is better to +// limit the total amount of time spent retrying. Idle transactions: A +// transaction is considered idle if it has no outstanding reads or SQL queries +// and has not started a read or SQL query within the last 10 seconds. Idle +// transactions can be aborted by Cloud Spanner so that they don't hold on to +// locks indefinitely. If an idle transaction is aborted, the commit will fail +// with error `ABORTED`. If this behavior is undesirable, periodically +// executing a simple SQL query in the transaction (for example, `SELECT 1`) +// prevents the transaction from becoming idle. Snapshot read-only +// transactions: Snapshot read-only transactions provides a simpler method than +// locking read-write transactions for doing several consistent reads. However, +// this type of transaction does not support writes. Snapshot transactions do +// not take locks. Instead, they work by choosing a Cloud Spanner timestamp, +// then executing all reads at that timestamp. Since they do not acquire locks, +// they do not block concurrent read-write transactions. Unlike locking +// read-write transactions, snapshot read-only transactions never abort. They +// can fail if the chosen read timestamp is garbage collected; however, the +// default garbage collection policy is generous enough that most applications +// do not need to worry about this in practice. Snapshot read-only transactions +// do not need to call Commit or Rollback (and in fact are not permitted to do +// so). To execute a snapshot transaction, the client specifies a timestamp +// bound, which tells Cloud Spanner how to choose a read timestamp. The types +// of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact +// staleness. If the Cloud Spanner database to be read is geographically +// distributed, stale read-only transactions can execute more quickly than +// strong or read-write transactions, because they are able to execute far from +// the leader replica. Each type of timestamp bound is discussed in detail +// below. Strong: Strong reads are guaranteed to see the effects of all +// transactions that have committed before the start of the read. Furthermore, +// all rows yielded by a single read are consistent with each other -- if any +// part of the read observes a transaction, all parts of the read see the +// transaction. Strong reads are not repeatable: two consecutive strong +// read-only transactions might return inconsistent results if there are +// concurrent writes. If consistency across reads is required, the reads should +// be executed within a transaction or at an exact read timestamp. Queries on +// change streams (see below for more details) must also specify the strong +// read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact +// staleness: These timestamp bounds execute reads at a user-specified +// timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of +// the global transaction history: they observe modifications done by all +// transactions with a commit timestamp less than or equal to the read +// timestamp, and observe none of the modifications done by transactions with a +// larger commit timestamp. They will block until all conflicting transactions +// that may be assigned commit timestamps <= the read timestamp have finished. +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. These modes do not +// require a "negotiation phase" to pick a timestamp. As a result, they execute +// slightly faster than the equivalent boundedly stale concurrency modes. On +// the other hand, boundedly stale reads usually return fresher results. See // TransactionOptions.ReadOnly.read_timestamp and // TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded // staleness modes allow Cloud Spanner to pick the read timestamp, subject to a @@ -5287,9 +5339,9 @@ func (s *Transaction) MarshalJSON() ([]byte, error) { // atomically to partitions of the table, in independent transactions. // Secondary index rows are updated atomically with the base table rows. - // Partitioned DML does not guarantee exactly-once execution semantics against -// a partition. The statement will be applied at least once to each partition. -// It is strongly recommended that the DML statement should be idempotent to -// avoid unexpected results. For instance, it is potentially dangerous to run a +// a partition. The statement is applied at least once to each partition. It is +// strongly recommended that the DML statement should be idempotent to avoid +// unexpected results. For instance, it is potentially dangerous to run a // statement such as `UPDATE table SET column = column + 1` as it could be run // multiple times against some rows. - The partitions are committed // automatically - there is no support for Commit or Rollback. If the call @@ -9079,6 +9131,24 @@ func (c *ProjectsInstancesBackupsCreateCall) EncryptionConfigKmsKeyName(encrypti return c } +// EncryptionConfigKmsKeyNames sets the optional parameter +// "encryptionConfig.kmsKeyNames": Specifies the KMS configuration for the one +// or more keys used to protect the backup. Values are of the form +// `projects//locations//keyRings//cryptoKeys/`. The keys referenced by +// kms_key_names must fully cover all regions of the backup's instance +// configuration. Some examples: * For single region instance configs, specify +// a single regional location KMS key. * For multi-regional instance configs of +// type GOOGLE_MANAGED, either specify a multi-regional location KMS key or +// multiple regional location KMS keys that cover all regions in the instance +// config. * For an instance config of type USER_MANAGED, please specify only +// regional location KMS keys to cover each region in the instance config. +// Multi-regional location KMS keys are not supported for USER_MANAGED instance +// configs. +func (c *ProjectsInstancesBackupsCreateCall) EncryptionConfigKmsKeyNames(encryptionConfigKmsKeyNames ...string) *ProjectsInstancesBackupsCreateCall { + c.urlParams_.SetMulti("encryptionConfig.kmsKeyNames", append([]string{}, encryptionConfigKmsKeyNames...)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more // details.