From 78d56c7481b3e5d5e7c6320d6b7f3b9dd703f691 Mon Sep 17 00:00:00 2001 From: Naohiro Yoshida Date: Fri, 26 Jul 2024 17:42:00 +0900 Subject: [PATCH] apply fix doc_lazy_continuation (#289) --- bigquery/src/http/job/mod.rs | 2 +- bigquery/src/http/model/mod.rs | 6 +++--- bigquery/src/http/table/mod.rs | 8 ++++---- googleapis/src/google.cloud.bigquery.storage.v1.rs | 12 ++++++------ googleapis/src/google.cloud.kms.v1.rs | 8 ++++---- .../src/google.devtools.artifactregistry.v1.rs | 6 +++--- googleapis/src/google.pubsub.v1.rs | 6 +++--- googleapis/src/google.r#type.rs | 2 +- googleapis/src/google.spanner.admin.database.v1.rs | 12 ++++++------ googleapis/src/google.spanner.admin.instance.v1.rs | 10 +++++----- pubsub/src/subscription.rs | 4 ++-- spanner/src/session.rs | 2 +- spanner/src/statement.rs | 2 +- storage/src/http/bucket_access_controls/mod.rs | 6 +++--- storage/src/http/object_access_controls/list.rs | 2 +- 15 files changed, 44 insertions(+), 44 deletions(-) diff --git a/bigquery/src/http/job/mod.rs b/bigquery/src/http/job/mod.rs index 90b62337..6f9d133c 100644 --- a/bigquery/src/http/job/mod.rs +++ b/bigquery/src/http/job/mod.rs @@ -201,7 +201,7 @@ pub struct JobConfigurationLoad { /// - (38,10) BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); /// - (76,38) BIGNUMERIC; /// - (77,38) BIGNUMERIC (error if value exeeds supported range). - /// This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. + /// This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. /// /// Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. pub decimal_target_types: Option>, diff --git a/bigquery/src/http/model/mod.rs b/bigquery/src/http/model/mod.rs index c4377247..8b003ee2 100644 --- a/bigquery/src/http/model/mod.rs +++ b/bigquery/src/http/model/mod.rs @@ -737,10 +737,10 @@ pub struct TrainingOptions { pub data_split_eval_fraction: Option, /// The column to split data with. This column won't be used as a feature. /// 1. When dataSplitMethod is CUSTOM, the corresponding column should be boolean. - /// The rows with true value tag are eval data, and the false are training data. + /// The rows with true value tag are eval data, and the false are training data. /// 2. When dataSplitMethod is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, - /// and the rest are eval data. - /// It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties + /// and the rest are eval data. + /// It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties pub data_split_column: Option, /// The strategy to determine learn rate for the current iteration. pub learn_rate_strategy: Option, diff --git a/bigquery/src/http/table/mod.rs b/bigquery/src/http/table/mod.rs index 229a21d6..b481fa45 100644 --- a/bigquery/src/http/table/mod.rs +++ b/bigquery/src/http/table/mod.rs @@ -162,12 +162,12 @@ pub struct GoogleSheetsOptions { /// The default value is 0. This property is useful if you have header rows that should be skipped. /// When autodetect is on, the behavior is the following: /// * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. - /// If they are not detected, the row is read as data. - /// Otherwise data is read starting from the second row. + /// If they are not detected, the row is read as data. + /// Otherwise data is read starting from the second row. /// * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. /// * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. - /// If headers are not detected, row N is just skipped. - /// Otherwise row N is used to extract column names for the detected schema. + /// If headers are not detected, row N is just skipped. + /// Otherwise row N is used to extract column names for the detected schema. #[serde(deserialize_with = "crate::http::from_str_option")] #[serde(default)] pub skip_leading_rows: Option, diff --git a/googleapis/src/google.cloud.bigquery.storage.v1.rs b/googleapis/src/google.cloud.bigquery.storage.v1.rs index 55181e7e..9601314a 100644 --- a/googleapis/src/google.cloud.bigquery.storage.v1.rs +++ b/googleapis/src/google.cloud.bigquery.storage.v1.rs @@ -921,8 +921,8 @@ pub struct AppendRowsRequest { /// * In the first request to an AppendRows connection. /// /// * In all subsequent requests to an AppendRows connection, if you use the - /// same connection to write to multiple tables or change the input schema for - /// default streams. + /// same connection to write to multiple tables or change the input schema for + /// default streams. /// /// For explicitly created write streams, the format is: /// @@ -1657,14 +1657,14 @@ pub mod big_query_write_client { /// table are governed by the type of stream: /// /// * For COMMITTED streams (which includes the default stream), data is - /// visible immediately upon successful append. + /// visible immediately upon successful append. /// /// * For BUFFERED streams, data is made visible via a subsequent `FlushRows` - /// rpc which advances a cursor to a newer offset in the stream. + /// rpc which advances a cursor to a newer offset in the stream. /// /// * For PENDING streams, data is not made visible until the stream itself is - /// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly - /// committed via the `BatchCommitWriteStreams` rpc. + /// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly + /// committed via the `BatchCommitWriteStreams` rpc. pub async fn append_rows( &mut self, request: impl tonic::IntoStreamingRequest, diff --git a/googleapis/src/google.cloud.kms.v1.rs b/googleapis/src/google.cloud.kms.v1.rs index d1082962..42f1ea63 100644 --- a/googleapis/src/google.cloud.kms.v1.rs +++ b/googleapis/src/google.cloud.kms.v1.rs @@ -3615,8 +3615,8 @@ pub mod ekm_connection { /// [EkmConnection][google.cloud.kms.v1.EkmConnection] must be initiated from /// the EKM directly and cannot be performed from Cloud KMS. This means that: /// * When creating a - /// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with - /// this + /// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with + /// this /// [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must /// supply the key path of pre-existing external key material that will be /// linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. @@ -3628,8 +3628,8 @@ pub mod ekm_connection { /// [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key /// management operations initiated from Cloud KMS. This means that: /// * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] - /// associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] - /// is + /// associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] + /// is /// created, the EKM automatically generates new key material and a new /// key path. The caller cannot supply the key path of pre-existing /// external key material. diff --git a/googleapis/src/google.devtools.artifactregistry.v1.rs b/googleapis/src/google.devtools.artifactregistry.v1.rs index 215f2c81..e2255ad2 100644 --- a/googleapis/src/google.devtools.artifactregistry.v1.rs +++ b/googleapis/src/google.devtools.artifactregistry.v1.rs @@ -719,9 +719,9 @@ pub struct ListFilesRequest { /// An example of using a filter: /// /// * `name="projects/p1/locations/us-central1/repositories/repo1/files/a/b/*"` --> Files with an - /// ID starting with "a/b/". + /// ID starting with "a/b/". /// * `owner="projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/1.0"` --> - /// Files owned by the version `1.0` in package `pkg1`. + /// Files owned by the version `1.0` in package `pkg1`. #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, /// The maximum number of files to return. @@ -1145,7 +1145,7 @@ pub struct ListTagsRequest { /// An example of using a filter: /// /// * `version="projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/1.0"` - /// --> Tags that are applied to the version `1.0` in package `pkg1`. + /// --> Tags that are applied to the version `1.0` in package `pkg1`. #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, /// The maximum number of tags to return. Maximum page size is 10,000. diff --git a/googleapis/src/google.pubsub.v1.rs b/googleapis/src/google.pubsub.v1.rs index 00ea8019..6b11e4b1 100644 --- a/googleapis/src/google.pubsub.v1.rs +++ b/googleapis/src/google.pubsub.v1.rs @@ -971,7 +971,7 @@ pub struct Subscription { /// a message with a given value of `message_id` on this subscription: /// /// * The message sent to a subscriber is guaranteed not to be resent - /// before the message's acknowledgement deadline expires. + /// before the message's acknowledgement deadline expires. /// * An acknowledged message will not be resent to a subscriber. /// /// Note that subscribers may still receive multiple copies of a message @@ -1258,9 +1258,9 @@ pub mod big_query_config { /// Cannot write to the BigQuery table because of permission denied errors. /// This can happen if /// - Pub/Sub SA has not been granted the [appropriate BigQuery IAM - /// permissions]() + /// permissions]() /// - bigquery.googleapis.com API is not enabled for the project - /// ([instructions]()) + /// ([instructions]()) PermissionDenied = 2, /// Cannot write to the BigQuery table because it does not exist. NotFound = 3, diff --git a/googleapis/src/google.r#type.rs b/googleapis/src/google.r#type.rs index 9eb064a3..57d13ed7 100644 --- a/googleapis/src/google.r#type.rs +++ b/googleapis/src/google.r#type.rs @@ -60,7 +60,7 @@ pub struct Expr { /// * A month and day value, with a zero year, such as an anniversary /// * A year on its own, with zero month and day values /// * A year and month value, with a zero day, such as a credit card expiration -/// date +/// date /// /// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and /// `google.protobuf.Timestamp`. diff --git a/googleapis/src/google.spanner.admin.database.v1.rs b/googleapis/src/google.spanner.admin.database.v1.rs index 9cb02a0e..a68fe2cc 100644 --- a/googleapis/src/google.spanner.admin.database.v1.rs +++ b/googleapis/src/google.spanner.admin.database.v1.rs @@ -516,7 +516,7 @@ pub struct ListBackupOperationsRequest { /// `metadata.database:prod` - Returns operations where: /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. /// * The database the backup was taken from has a name containing the - /// string "prod". + /// string "prod". /// * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ /// `(metadata.name:howl) AND` \ /// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ @@ -531,7 +531,7 @@ pub struct ListBackupOperationsRequest { /// `(error:*)` - Returns operations where: /// * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. /// * The source backup of the copied backup name contains the string - /// "test". + /// "test". /// * The operation started before 2022-01-18T14:50:00Z. /// * The operation resulted in an error. /// * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -542,11 +542,11 @@ pub struct ListBackupOperationsRequest { /// `(error:*)` - Returns operations where: /// * The operation's metadata matches either of criteria: /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - /// database the backup was taken from has name containing string - /// "test_db" + /// database the backup was taken from has name containing string + /// "test_db" /// * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - /// backup the backup was copied from has name containing string - /// "test_bkp" + /// backup the backup was copied from has name containing string + /// "test_bkp" /// * The operation resulted in an error. #[prost(string, tag = "2")] pub filter: ::prost::alloc::string::String, diff --git a/googleapis/src/google.spanner.admin.instance.v1.rs b/googleapis/src/google.spanner.admin.instance.v1.rs index b1f06c06..49b8a6d3 100644 --- a/googleapis/src/google.spanner.admin.instance.v1.rs +++ b/googleapis/src/google.spanner.admin.instance.v1.rs @@ -527,7 +527,7 @@ pub struct ListInstanceConfigOperationsRequest { /// `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \ /// `(error:*)` - Return operations where: /// * The operation's metadata type is - /// [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + /// [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. /// * The instance config name contains "custom-config". /// * The operation started before 2021-03-28T14:50:00Z. /// * The operation resulted in an error. @@ -922,8 +922,8 @@ pub mod instance_admin_client { /// /// * Instances can be created using the instance configuration. /// * The instance config's - /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - /// field becomes false. Its state becomes `READY`. + /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + /// field becomes false. Its state becomes `READY`. /// /// The returned [long-running operation][google.longrunning.Operation] will /// have a name of the format @@ -988,8 +988,8 @@ pub mod instance_admin_client { /// values. /// * The instance config's new values are readable via the API. /// * The instance config's - /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - /// field becomes false. + /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + /// field becomes false. /// /// The returned [long-running operation][google.longrunning.Operation] will /// have a name of the format diff --git a/pubsub/src/subscription.rs b/pubsub/src/subscription.rs index e256a0d2..f61e0e7a 100644 --- a/pubsub/src/subscription.rs +++ b/pubsub/src/subscription.rs @@ -649,8 +649,8 @@ impl Subscription { /// - The message backlog on the subscription -- or to be specific, messages that are unacknowledged /// at the time of the subscription's creation. /// - All messages published to the subscription's topic after the snapshot's creation. - /// Snapshots have a finite lifetime -- a maximum of 7 days from the time of creation, beyond which - /// they are discarded and any messages being retained solely due to the snapshot dropped. + /// Snapshots have a finite lifetime -- a maximum of 7 days from the time of creation, beyond which + /// they are discarded and any messages being retained solely due to the snapshot dropped. pub async fn create_snapshot( &self, name: &str, diff --git a/spanner/src/session.rs b/spanner/src/session.rs index c9b9588b..d5a0542f 100644 --- a/spanner/src/session.rs +++ b/spanner/src/session.rs @@ -311,7 +311,7 @@ impl SessionPool { /// If the session is valid /// - Pass the session to the first user on the waiting list. /// - If there is no waiting list, the session is returned to the list of available sessions. - /// If the session is invalid + /// If the session is invalid /// - Discard the session. If the number of sessions falls below the threshold as a result of discarding, the session replenishment process is called. fn recycle(&self, mut session: SessionHandle) { if session.valid { diff --git a/spanner/src/statement.rs b/spanner/src/statement.rs index 708e0843..1c336495 100644 --- a/spanner/src/statement.rs +++ b/spanner/src/statement.rs @@ -315,7 +315,7 @@ mod test { // Test that prost's to_kind implementation works as expected. #[test] fn prost_timestamp_to_kind_works() { - let ts = ::prost_types::Timestamp::date_time(2024, 01, 01, 12, 15, 36).unwrap(); + let ts = ::prost_types::Timestamp::date_time(2024, 1, 1, 12, 15, 36).unwrap(); let expected = String::from("2024-01-01T12:15:36Z"); // Make sure the formatting of prost_types::Timestamp hasn't changed assert_eq!(format!("{ts:}"), expected); diff --git a/storage/src/http/bucket_access_controls/mod.rs b/storage/src/http/bucket_access_controls/mod.rs index 9830d9b7..ecafd0ec 100644 --- a/storage/src/http/bucket_access_controls/mod.rs +++ b/storage/src/http/bucket_access_controls/mod.rs @@ -42,12 +42,12 @@ pub struct BucketAccessControl { /// * `project-{team-projectid}` /// * `allUsers` /// * `allAuthenticatedUsers` - /// Examples: + /// Examples: /// * The user `liz@example.com` would be `user-liz@example.com`. /// * The group `example@googlegroups.com` would be - /// `group-example@googlegroups.com` + /// `group-example@googlegroups.com` /// * All members of the Google Apps for Business domain `example.com` would be - /// `domain-example.com` + /// `domain-example.com` pub entity: String, /// The ID for the entity, if any. pub entity_id: Option, diff --git a/storage/src/http/object_access_controls/list.rs b/storage/src/http/object_access_controls/list.rs index 66144892..8867a4d1 100644 --- a/storage/src/http/object_access_controls/list.rs +++ b/storage/src/http/object_access_controls/list.rs @@ -14,7 +14,7 @@ pub struct ListObjectAccessControlsRequest { /// * `group-`*emailAddress* /// * `allUsers` /// * `allAuthenticatedUsers` - /// Required. Name of the object. + /// Required. Name of the object. #[serde(skip_serializing)] pub object: String, /// If present, selects a specific revision of this object (as opposed to the