From cd711425d2c9f23668d9436239ec2ec7fea5652c Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Thu, 18 Jan 2024 19:13:36 +0000 Subject: [PATCH] Regenerated Clients --- .../11bdec22ee6043ca8daf5be3aad2724f.json | 8 + .../2fe880b9a393473a88e4a86a74693c7f.json | 8 + .../892215bcd9d042e59875098201fb1f0a.json | 8 + .../a98185d3d5bb43c69b9e6c99e377a0ba.json | 8 + .../b00461b70e994a1a831e987d3689de26.json | 8 + .../bf49d54df6a44cdd8f510f53feedb832.json | 8 + service/b2bi/api_op_TestParsing.go | 3 +- service/b2bi/deserializers.go | 9 + service/cloudtrail/api_op_CreateTrail.go | 3 +- .../cloudtrail/api_op_DisableFederation.go | 9 +- service/cloudtrail/api_op_EnableFederation.go | 14 +- .../api_op_ListInsightsMetricData.go | 322 ++++++++++++++++++ .../cloudtrail/api_op_UpdateEventDataStore.go | 8 +- service/cloudtrail/api_op_UpdateTrail.go | 3 +- service/cloudtrail/deserializers.go | 309 +++++++++++++++++ service/cloudtrail/generated.json | 1 + service/cloudtrail/serializers.go | 112 ++++++ service/cloudtrail/types/enums.go | 18 + service/cloudtrail/types/types.go | 142 +++++--- service/cloudtrail/validators.go | 45 +++ .../connect/api_op_GetCurrentMetricData.go | 8 +- service/connect/api_op_GetMetricDataV2.go | 97 +++--- .../connect/api_op_GetTrafficDistribution.go | 4 +- service/drs/api_op_DescribeJobLogItems.go | 12 +- service/drs/api_op_DescribeJobs.go | 12 +- ...op_DescribeLaunchConfigurationTemplates.go | 12 +- .../drs/api_op_DescribeRecoveryInstances.go | 12 +- .../drs/api_op_DescribeRecoverySnapshots.go | 12 +- ...scribeReplicationConfigurationTemplates.go | 12 +- service/drs/api_op_DescribeSourceNetworks.go | 12 +- service/drs/api_op_DescribeSourceServers.go | 12 +- .../drs/api_op_ListExtensibleSourceServers.go | 12 +- service/drs/api_op_ListLaunchActions.go | 12 +- service/drs/api_op_PutLaunchAction.go | 4 +- service/drs/deserializers.go | 8 +- service/drs/serializers.go | 52 +-- service/drs/types/types.go | 6 +- service/drs/validators.go | 9 + .../firehose/api_op_CreateDeliveryStream.go | 3 + service/firehose/api_op_UpdateDestination.go | 3 + service/firehose/deserializers.go | 289 ++++++++++++++++ service/firehose/serializers.go | 266 +++++++++++++++ service/firehose/types/enums.go | 38 +++ service/firehose/types/types.go | 291 ++++++++++++++++ service/firehose/validators.go | 97 ++++++ .../api_op_DeleteRecord.go | 24 +- service/sso/internal/endpoints/endpoints.go | 8 + 47 files changed, 2176 insertions(+), 197 deletions(-) create mode 100644 .changelog/11bdec22ee6043ca8daf5be3aad2724f.json create mode 100644 .changelog/2fe880b9a393473a88e4a86a74693c7f.json create mode 100644 .changelog/892215bcd9d042e59875098201fb1f0a.json create mode 100644 .changelog/a98185d3d5bb43c69b9e6c99e377a0ba.json create mode 100644 .changelog/b00461b70e994a1a831e987d3689de26.json create mode 100644 .changelog/bf49d54df6a44cdd8f510f53feedb832.json create mode 100644 service/cloudtrail/api_op_ListInsightsMetricData.go diff --git a/.changelog/11bdec22ee6043ca8daf5be3aad2724f.json b/.changelog/11bdec22ee6043ca8daf5be3aad2724f.json new file mode 100644 index 00000000000..3a31f749b03 --- /dev/null +++ b/.changelog/11bdec22ee6043ca8daf5be3aad2724f.json @@ -0,0 +1,8 @@ +{ + "id": "11bdec22-ee60-43ca-8daf-5be3aad2724f", + "type": "feature", + "description": "Increase BatchGetRecord limits from 10 items to 100 items", + "modules": [ + "service/sagemakerfeaturestoreruntime" + ] +} \ No newline at end of file diff --git a/.changelog/2fe880b9a393473a88e4a86a74693c7f.json b/.changelog/2fe880b9a393473a88e4a86a74693c7f.json new file mode 100644 index 00000000000..a25b7c65cf2 --- /dev/null +++ b/.changelog/2fe880b9a393473a88e4a86a74693c7f.json @@ -0,0 +1,8 @@ +{ + "id": "2fe880b9-a393-473a-88e4-a86a74693c7f", + "type": "feature", + "description": "Allow support for Snowflake as a Kinesis Data Firehose delivery destination.", + "modules": [ + "service/firehose" + ] +} \ No newline at end of file diff --git a/.changelog/892215bcd9d042e59875098201fb1f0a.json b/.changelog/892215bcd9d042e59875098201fb1f0a.json new file mode 100644 index 00000000000..d7089270fcd --- /dev/null +++ b/.changelog/892215bcd9d042e59875098201fb1f0a.json @@ -0,0 +1,8 @@ +{ + "id": "892215bc-d9d0-42e5-9875-098201fb1f0a", + "type": "feature", + "description": "Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs.", + "modules": [ + "service/b2bi" + ] +} \ No newline at end of file diff --git a/.changelog/a98185d3d5bb43c69b9e6c99e377a0ba.json b/.changelog/a98185d3d5bb43c69b9e6c99e377a0ba.json new file mode 100644 index 00000000000..9d1787b00a1 --- /dev/null +++ b/.changelog/a98185d3d5bb43c69b9e6c99e377a0ba.json @@ -0,0 +1,8 @@ +{ + "id": "a98185d3-d5bb-43c6-9b9e-6c99e377a0ba", + "type": "feature", + "description": "This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights.", + "modules": [ + "service/cloudtrail" + ] +} \ No newline at end of file diff --git a/.changelog/b00461b70e994a1a831e987d3689de26.json b/.changelog/b00461b70e994a1a831e987d3689de26.json new file mode 100644 index 00000000000..5db4297513f --- /dev/null +++ b/.changelog/b00461b70e994a1a831e987d3689de26.json @@ -0,0 +1,8 @@ +{ + "id": "b00461b7-0e99-4a1a-831e-987d3689de26", + "type": "feature", + "description": "Removed invalid and unnecessary default values.", + "modules": [ + "service/drs" + ] +} \ No newline at end of file diff --git a/.changelog/bf49d54df6a44cdd8f510f53feedb832.json b/.changelog/bf49d54df6a44cdd8f510f53feedb832.json new file mode 100644 index 00000000000..3496cdd4507 --- /dev/null +++ b/.changelog/bf49d54df6a44cdd8f510f53feedb832.json @@ -0,0 +1,8 @@ +{ + "id": "bf49d54d-f6a4-4cdd-8f51-0f53feedb832", + "type": "feature", + "description": "GetMetricDataV2 now supports 3 groupings", + "modules": [ + "service/connect" + ] +} \ No newline at end of file diff --git a/service/b2bi/api_op_TestParsing.go b/service/b2bi/api_op_TestParsing.go index f894b1183c1..46a73b016e4 100644 --- a/service/b2bi/api_op_TestParsing.go +++ b/service/b2bi/api_op_TestParsing.go @@ -12,7 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Parses the input EDI (electronic data interchange) file. +// Parses the input EDI (electronic data interchange) file. The input file has a +// file size limit of 250 KB. func (c *Client) TestParsing(ctx context.Context, params *TestParsingInput, optFns ...func(*Options)) (*TestParsingOutput, error) { if params == nil { params = &TestParsingInput{} diff --git a/service/b2bi/deserializers.go b/service/b2bi/deserializers.go index 138fc7558da..5d66a84045c 100644 --- a/service/b2bi/deserializers.go +++ b/service/b2bi/deserializers.go @@ -2004,6 +2004,9 @@ func awsAwsjson10_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) @@ -2327,6 +2330,9 @@ func awsAwsjson10_deserializeOpErrorTagResource(response *smithyhttp.Response, m } switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) @@ -2668,6 +2674,9 @@ func awsAwsjson10_deserializeOpErrorUntagResource(response *smithyhttp.Response, } switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) diff --git a/service/cloudtrail/api_op_CreateTrail.go b/service/cloudtrail/api_op_CreateTrail.go index 3c5157433a3..b5f3a6ac407 100644 --- a/service/cloudtrail/api_op_CreateTrail.go +++ b/service/cloudtrail/api_op_CreateTrail.go @@ -54,8 +54,7 @@ type CreateTrailInput struct { // Specifies a log group name using an Amazon Resource Name (ARN), a unique // identifier that represents the log group to which CloudTrail logs will be // delivered. You must use a log group that exists in your account. Not required - // unless you specify CloudWatchLogsRoleArn . Only the management account can - // configure a CloudWatch Logs log group for an organization trail. + // unless you specify CloudWatchLogsRoleArn . CloudWatchLogsLogGroupArn *string // Specifies the role for the CloudWatch Logs endpoint to assume to write to a diff --git a/service/cloudtrail/api_op_DisableFederation.go b/service/cloudtrail/api_op_DisableFederation.go index e0663ec0a5d..7f0ec8ea5fd 100644 --- a/service/cloudtrail/api_op_DisableFederation.go +++ b/service/cloudtrail/api_op_DisableFederation.go @@ -13,10 +13,11 @@ import ( ) // Disables Lake query federation on the specified event data store. When you -// disable federation, CloudTrail removes the metadata associated with the -// federated event data store in the Glue Data Catalog and removes registration for -// the federation role ARN and event data store in Lake Formation. No CloudTrail -// Lake data is deleted when you disable federation. +// disable federation, CloudTrail disables the integration with Glue, Lake +// Formation, and Amazon Athena. After disabling Lake query federation, you can no +// longer query your event data in Amazon Athena. No CloudTrail Lake data is +// deleted when you disable federation and you can continue to run queries in +// CloudTrail Lake. func (c *Client) DisableFederation(ctx context.Context, params *DisableFederationInput, optFns ...func(*Options)) (*DisableFederationOutput, error) { if params == nil { params = &DisableFederationInput{} diff --git a/service/cloudtrail/api_op_EnableFederation.go b/service/cloudtrail/api_op_EnableFederation.go index 604deff07ab..9813e61fa43 100644 --- a/service/cloudtrail/api_op_EnableFederation.go +++ b/service/cloudtrail/api_op_EnableFederation.go @@ -18,13 +18,13 @@ import ( // and run SQL queries against your event data using Amazon Athena. The table // metadata stored in the Glue Data Catalog lets the Athena query engine know how // to find, read, and process the data that you want to query. When you enable Lake -// query federation, CloudTrail creates a federated database named aws:cloudtrail -// (if the database doesn't already exist) and a federated table in the Glue Data -// Catalog. The event data store ID is used for the table name. CloudTrail -// registers the role ARN and event data store in Lake Formation (https://docs.aws.amazon.com/lake-formation/latest/dg/how-it-works.html) -// , the service responsible for revoking or granting permissions to the federated -// resources in the Glue Data Catalog. For more information about Lake query -// federation, see Federate an event data store (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-federation.html) +// query federation, CloudTrail creates a managed database named aws:cloudtrail +// (if the database doesn't already exist) and a managed federated table in the +// Glue Data Catalog. The event data store ID is used for the table name. +// CloudTrail registers the role ARN and event data store in Lake Formation (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-federation-lake-formation.html) +// , the service responsible for allowing fine-grained access control of the +// federated resources in the Glue Data Catalog. For more information about Lake +// query federation, see Federate an event data store (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-federation.html) // . func (c *Client) EnableFederation(ctx context.Context, params *EnableFederationInput, optFns ...func(*Options)) (*EnableFederationOutput, error) { if params == nil { diff --git a/service/cloudtrail/api_op_ListInsightsMetricData.go b/service/cloudtrail/api_op_ListInsightsMetricData.go new file mode 100644 index 00000000000..b1b19cb8514 --- /dev/null +++ b/service/cloudtrail/api_op_ListInsightsMetricData.go @@ -0,0 +1,322 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package cloudtrail + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns Insights metrics data for trails that have enabled Insights. The +// request must include the EventSource , EventName , and InsightType parameters. +// If the InsightType is set to ApiErrorRateInsight , the request must also include +// the ErrorCode parameter. The following are the available time periods for +// ListInsightsMetricData . Each cutoff is inclusive. +// - Data points with a period of 60 seconds (1-minute) are available for 15 +// days. +// - Data points with a period of 300 seconds (5-minute) are available for 63 +// days. +// - Data points with a period of 3600 seconds (1 hour) are available for 90 +// days. +// +// Access to the ListInsightsMetricData API operation is linked to the +// cloudtrail:LookupEvents action. To use this operation, you must have permissions +// to perform the cloudtrail:LookupEvents action. +func (c *Client) ListInsightsMetricData(ctx context.Context, params *ListInsightsMetricDataInput, optFns ...func(*Options)) (*ListInsightsMetricDataOutput, error) { + if params == nil { + params = &ListInsightsMetricDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListInsightsMetricData", params, optFns, c.addOperationListInsightsMetricDataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListInsightsMetricDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListInsightsMetricDataInput struct { + + // The name of the event, typically the Amazon Web Services API on which unusual + // levels of activity were recorded. + // + // This member is required. + EventName *string + + // The Amazon Web Services service to which the request was made, such as + // iam.amazonaws.com or s3.amazonaws.com . + // + // This member is required. + EventSource *string + + // The type of CloudTrail Insights event, which is either ApiCallRateInsight or + // ApiErrorRateInsight . The ApiCallRateInsight Insights type analyzes write-only + // management API calls that are aggregated per minute against a baseline API call + // volume. The ApiErrorRateInsight Insights type analyzes management API calls + // that result in error codes. + // + // This member is required. + InsightType types.InsightType + + // Type of datapoints to return. Valid values are NonZeroData and FillWithZeros . + // The default is NonZeroData . + DataType types.InsightsMetricDataType + + // Specifies, in UTC, the end time for time-series data. The value specified is + // exclusive; results include data points up to the specified time stamp. The + // default is the time of request. + EndTime *time.Time + + // Conditionally required if the InsightType parameter is set to + // ApiErrorRateInsight . If returning metrics for the ApiErrorRateInsight Insights + // type, this is the error to retrieve data for. For example, AccessDenied . + ErrorCode *string + + // The maximum number of datapoints to return. Valid values are integers from 1 to + // 21600. The default value is 21600. + MaxResults *int32 + + // Returned if all datapoints can't be returned in a single call. For example, due + // to reaching MaxResults . Add this parameter to the request to continue + // retrieving results starting from the last evaluated point. + NextToken *string + + // Granularity of data to retrieve, in seconds. Valid values are 60 , 300 , and + // 3600 . If you specify any other value, you will get an error. The default is + // 3600 seconds. + Period *int32 + + // Specifies, in UTC, the start time for time-series data. The value specified is + // inclusive; results include data points with the specified time stamp. The + // default is 90 days before the time of request. + StartTime *time.Time + + noSmithyDocumentSerde +} + +type ListInsightsMetricDataOutput struct { + + // Only returned if InsightType parameter was set to ApiErrorRateInsight . If + // returning metrics for the ApiErrorRateInsight Insights type, this is the error + // to retrieve data for. For example, AccessDenied . + ErrorCode *string + + // The name of the event, typically the Amazon Web Services API on which unusual + // levels of activity were recorded. + EventName *string + + // The Amazon Web Services service to which the request was made, such as + // iam.amazonaws.com or s3.amazonaws.com . + EventSource *string + + // The type of CloudTrail Insights event, which is either ApiCallRateInsight or + // ApiErrorRateInsight . The ApiCallRateInsight Insights type analyzes write-only + // management API calls that are aggregated per minute against a baseline API call + // volume. The ApiErrorRateInsight Insights type analyzes management API calls + // that result in error codes. + InsightType types.InsightType + + // Only returned if the full results could not be returned in a single query. You + // can set the NextToken parameter in the next request to this value to continue + // retrieval. + NextToken *string + + // List of timestamps at intervals corresponding to the specified time period. + Timestamps []time.Time + + // List of values representing the API call rate or error rate at each timestamp. + // The number of values is equal to the number of timestamps. + Values []float64 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListInsightsMetricDataMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListInsightsMetricData{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListInsightsMetricData{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListInsightsMetricData"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpListInsightsMetricDataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListInsightsMetricData(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListInsightsMetricDataAPIClient is a client that implements the +// ListInsightsMetricData operation. +type ListInsightsMetricDataAPIClient interface { + ListInsightsMetricData(context.Context, *ListInsightsMetricDataInput, ...func(*Options)) (*ListInsightsMetricDataOutput, error) +} + +var _ ListInsightsMetricDataAPIClient = (*Client)(nil) + +// ListInsightsMetricDataPaginatorOptions is the paginator options for +// ListInsightsMetricData +type ListInsightsMetricDataPaginatorOptions struct { + // The maximum number of datapoints to return. Valid values are integers from 1 to + // 21600. The default value is 21600. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListInsightsMetricDataPaginator is a paginator for ListInsightsMetricData +type ListInsightsMetricDataPaginator struct { + options ListInsightsMetricDataPaginatorOptions + client ListInsightsMetricDataAPIClient + params *ListInsightsMetricDataInput + nextToken *string + firstPage bool +} + +// NewListInsightsMetricDataPaginator returns a new ListInsightsMetricDataPaginator +func NewListInsightsMetricDataPaginator(client ListInsightsMetricDataAPIClient, params *ListInsightsMetricDataInput, optFns ...func(*ListInsightsMetricDataPaginatorOptions)) *ListInsightsMetricDataPaginator { + if params == nil { + params = &ListInsightsMetricDataInput{} + } + + options := ListInsightsMetricDataPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListInsightsMetricDataPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListInsightsMetricDataPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListInsightsMetricData page. +func (p *ListInsightsMetricDataPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListInsightsMetricDataOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListInsightsMetricData(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListInsightsMetricData(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListInsightsMetricData", + } +} diff --git a/service/cloudtrail/api_op_UpdateEventDataStore.go b/service/cloudtrail/api_op_UpdateEventDataStore.go index 293cedba35d..42d0ea8e427 100644 --- a/service/cloudtrail/api_op_UpdateEventDataStore.go +++ b/service/cloudtrail/api_op_UpdateEventDataStore.go @@ -20,11 +20,11 @@ import ( // set to EXTENDABLE_RETENTION_PRICING , or between 7 and 2557 if BillingMode is // set to FIXED_RETENTION_PRICING . By default, TerminationProtection is enabled. // For event data stores for CloudTrail events, AdvancedEventSelectors includes or -// excludes management, data, or Insights events in your event data store. For more +// excludes management or data events in your event data store. For more // information about AdvancedEventSelectors , see AdvancedEventSelectors (https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedEventSelector.html) -// . For event data stores for Config configuration items, Audit Manager evidence, -// or non-Amazon Web Services events, AdvancedEventSelectors includes events of -// that type in your event data store. +// . For event data stores for CloudTrail Insights events, Config configuration +// items, Audit Manager evidence, or non-Amazon Web Services events, +// AdvancedEventSelectors includes events of that type in your event data store. func (c *Client) UpdateEventDataStore(ctx context.Context, params *UpdateEventDataStoreInput, optFns ...func(*Options)) (*UpdateEventDataStoreOutput, error) { if params == nil { params = &UpdateEventDataStoreInput{} diff --git a/service/cloudtrail/api_op_UpdateTrail.go b/service/cloudtrail/api_op_UpdateTrail.go index db893be6535..8d479566725 100644 --- a/service/cloudtrail/api_op_UpdateTrail.go +++ b/service/cloudtrail/api_op_UpdateTrail.go @@ -53,8 +53,7 @@ type UpdateTrailInput struct { // Specifies a log group name using an Amazon Resource Name (ARN), a unique // identifier that represents the log group to which CloudTrail logs are delivered. // You must use a log group that exists in your account. Not required unless you - // specify CloudWatchLogsRoleArn . Only the management account can configure a - // CloudWatch Logs log group for an organization trail. + // specify CloudWatchLogsRoleArn . CloudWatchLogsLogGroupArn *string // Specifies the role for the CloudWatch Logs endpoint to assume to write to a diff --git a/service/cloudtrail/deserializers.go b/service/cloudtrail/deserializers.go index ec859e5a7ca..ee8ab73a6ad 100644 --- a/service/cloudtrail/deserializers.go +++ b/service/cloudtrail/deserializers.go @@ -17,7 +17,9 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" + "math" "strings" + "time" ) type awsAwsjson11_deserializeOpAddTags struct { @@ -3680,6 +3682,123 @@ func awsAwsjson11_deserializeOpErrorListImports(response *smithyhttp.Response, m } } +type awsAwsjson11_deserializeOpListInsightsMetricData struct { +} + +func (*awsAwsjson11_deserializeOpListInsightsMetricData) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListInsightsMetricData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListInsightsMetricData(response, &metadata) + } + output := &ListInsightsMetricDataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListInsightsMetricDataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListInsightsMetricData(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("OperationNotPermittedException", errorCode): + return awsAwsjson11_deserializeErrorOperationNotPermittedException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson11_deserializeOpListPublicKeys struct { } @@ -12228,6 +12347,67 @@ func awsAwsjson11_deserializeDocumentInsightSelectors(v *[]types.InsightSelector return nil } +func awsAwsjson11_deserializeDocumentInsightsMetricValues(v *[]float64, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []float64 + if *v == nil { + cv = []float64{} + } else { + cv = *v + } + + for _, value := range shape { + var col float64 + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + col = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + col = f64 + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + + } + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentInsufficientDependencyServiceAccessPermissionException(v **types.InsufficientDependencyServiceAccessPermissionException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15099,6 +15279,49 @@ func awsAwsjson11_deserializeDocumentThrottlingException(v **types.ThrottlingExc return nil } +func awsAwsjson11_deserializeDocumentTimestamps(v *[]time.Time, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []time.Time + if *v == nil { + cv = []time.Time{} + } else { + cv = *v + } + + for _, value := range shape { + var col time.Time + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + col = smithytime.ParseEpochSeconds(f64) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentTrail(v **types.Trail, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -17446,6 +17669,92 @@ func awsAwsjson11_deserializeOpDocumentListImportsOutput(v **ListImportsOutput, return nil } +func awsAwsjson11_deserializeOpDocumentListInsightsMetricDataOutput(v **ListInsightsMetricDataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListInsightsMetricDataOutput + if *v == nil { + sv = &ListInsightsMetricDataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.ErrorCode = ptr.String(jtv) + } + + case "EventName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventName to be of type string, got %T instead", value) + } + sv.EventName = ptr.String(jtv) + } + + case "EventSource": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventSource to be of type string, got %T instead", value) + } + sv.EventSource = ptr.String(jtv) + } + + case "InsightType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InsightType to be of type string, got %T instead", value) + } + sv.InsightType = types.InsightType(jtv) + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InsightsMetricNextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "Timestamps": + if err := awsAwsjson11_deserializeDocumentTimestamps(&sv.Timestamps, value); err != nil { + return err + } + + case "Values": + if err := awsAwsjson11_deserializeDocumentInsightsMetricValues(&sv.Values, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentListPublicKeysOutput(v **ListPublicKeysOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/cloudtrail/generated.json b/service/cloudtrail/generated.json index cd4b82b762a..5d99ae5001f 100644 --- a/service/cloudtrail/generated.json +++ b/service/cloudtrail/generated.json @@ -36,6 +36,7 @@ "api_op_ListEventDataStores.go", "api_op_ListImportFailures.go", "api_op_ListImports.go", + "api_op_ListInsightsMetricData.go", "api_op_ListPublicKeys.go", "api_op_ListQueries.go", "api_op_ListTags.go", diff --git a/service/cloudtrail/serializers.go b/service/cloudtrail/serializers.go index f9746e098de..89ace4c0774 100644 --- a/service/cloudtrail/serializers.go +++ b/service/cloudtrail/serializers.go @@ -1501,6 +1501,61 @@ func (m *awsAwsjson11_serializeOpListImports) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpListInsightsMetricData struct { +} + +func (*awsAwsjson11_serializeOpListInsightsMetricData) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListInsightsMetricData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListInsightsMetricDataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("CloudTrail_20131101.ListInsightsMetricData") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListInsightsMetricDataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpListPublicKeys struct { } @@ -3596,6 +3651,63 @@ func awsAwsjson11_serializeOpDocumentListImportsInput(v *ListImportsInput, value return nil } +func awsAwsjson11_serializeOpDocumentListInsightsMetricDataInput(v *ListInsightsMetricDataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.DataType) > 0 { + ok := object.Key("DataType") + ok.String(string(v.DataType)) + } + + if v.EndTime != nil { + ok := object.Key("EndTime") + ok.Double(smithytime.FormatEpochSeconds(*v.EndTime)) + } + + if v.ErrorCode != nil { + ok := object.Key("ErrorCode") + ok.String(*v.ErrorCode) + } + + if v.EventName != nil { + ok := object.Key("EventName") + ok.String(*v.EventName) + } + + if v.EventSource != nil { + ok := object.Key("EventSource") + ok.String(*v.EventSource) + } + + if len(v.InsightType) > 0 { + ok := object.Key("InsightType") + ok.String(string(v.InsightType)) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.Period != nil { + ok := object.Key("Period") + ok.Integer(*v.Period) + } + + if v.StartTime != nil { + ok := object.Key("StartTime") + ok.Double(smithytime.FormatEpochSeconds(*v.StartTime)) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentListPublicKeysInput(v *ListPublicKeysInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/cloudtrail/types/enums.go b/service/cloudtrail/types/enums.go index 26af3c4c99a..3befd99d8b6 100644 --- a/service/cloudtrail/types/enums.go +++ b/service/cloudtrail/types/enums.go @@ -178,6 +178,24 @@ func (ImportStatus) Values() []ImportStatus { } } +type InsightsMetricDataType string + +// Enum values for InsightsMetricDataType +const ( + InsightsMetricDataTypeFillWithZeros InsightsMetricDataType = "FillWithZeros" + InsightsMetricDataTypeNonZeroData InsightsMetricDataType = "NonZeroData" +) + +// Values returns all known values for InsightsMetricDataType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InsightsMetricDataType) Values() []InsightsMetricDataType { + return []InsightsMetricDataType{ + "FillWithZeros", + "NonZeroData", + } +} + type InsightType string // Enum values for InsightType diff --git a/service/cloudtrail/types/types.go b/service/cloudtrail/types/types.go index 739c03340b0..f048d50e9b4 100644 --- a/service/cloudtrail/types/types.go +++ b/service/cloudtrail/types/types.go @@ -38,13 +38,15 @@ type AdvancedEventSelector struct { type AdvancedFieldSelector struct { // A field in a CloudTrail event record on which to filter events to be logged. - // For event data stores for Config configuration items, Audit Manager evidence, or - // non-Amazon Web Services events, the field is used only for selecting events as - // filtering is not supported. For CloudTrail event records, supported fields - // include readOnly , eventCategory , eventSource (for management events), - // eventName , resources.type , and resources.ARN . For event data stores for - // Config configuration items, Audit Manager evidence, or non-Amazon Web Services - // events, the only supported field is eventCategory . + // For event data stores for CloudTrail Insights events, Config configuration + // items, Audit Manager evidence, or events outside of Amazon Web Services, the + // field is used only for selecting events as filtering is not supported. For + // CloudTrail management events, supported fields include readOnly , eventCategory + // , and eventSource . For CloudTrail data events, supported fields include + // readOnly , eventCategory , eventName , resources.type , and resources.ARN . For + // event data stores for CloudTrail Insights events, Config configuration items, + // Audit Manager evidence, or events outside of Amazon Web Services, the only + // supported field is eventCategory . // - readOnly - Optional. Can be set to Equals a value of true or false . If you // do not add this field, CloudTrail logs both read and write events. A value of // true logs only read events. A value of false logs only write events. @@ -54,8 +56,10 @@ type AdvancedFieldSelector struct { // any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock . // You can have multiple values for this field, separated by commas. // - eventCategory - This is required and must be set to Equals . - // - For CloudTrail event records, the value must be Management or Data . - // - For CloudTrail Insights event records, the value must be Insight . + // - For CloudTrail management events, the value must be Management . + // - For CloudTrail data events, the value must be Data . The following are used + // only for event data stores: + // - For CloudTrail Insights events, the value must be Insight . // - For Config configuration items, the value must be ConfigurationItem . // - For Audit Manager evidence, the value must be Evidence . // - For non-Amazon Web Services events, the value must be ActivityAuditLog . @@ -65,6 +69,11 @@ type AdvancedFieldSelector struct { // - AWS::DynamoDB::Table // - AWS::Lambda::Function // - AWS::S3::Object + // - AWS::B2BI::Transformer + // - AWS::Bedrock::AgentAlias + // - AWS::Bedrock::KnowledgeBase + // - AWS::Cassandra::Table + // - AWS::CloudFront::KeyValueStore // - AWS::CloudTrail::Channel // - AWS::CodeWhisperer::Customization // - AWS::CodeWhisperer::Profile @@ -75,21 +84,35 @@ type AdvancedFieldSelector struct { // - AWS::FinSpace::Environment // - AWS::Glue::Table // - AWS::GuardDuty::Detector + // - AWS::IoTTwinMaker::Entity + // - AWS::IoTTwinMaker::Workspace // - AWS::KendraRanking::ExecutionPlan // - AWS::KinesisVideo::Stream // - AWS::ManagedBlockchain::Network // - AWS::ManagedBlockchain::Node // - AWS::MedicalImaging::Datastore + // - AWS::NeptuneGraph::Graph // - AWS::PCAConnectorAD::Connector + // - AWS::QBusiness::Application + // - AWS::QBusiness::DataSource + // - AWS::QBusiness::Index + // - AWS::QBusiness::WebExperience + // - AWS::RDS::DBCluster // - AWS::SageMaker::Endpoint // - AWS::SageMaker::ExperimentTrialComponent // - AWS::SageMaker::FeatureGroup + // - AWS::ServiceDiscovery::Namespace + // - AWS::ServiceDiscovery::Service + // - AWS::SCN::Instance // - AWS::SNS::PlatformEndpoint // - AWS::SNS::Topic + // - AWS::SQS::Queue // - AWS::S3::AccessPoint // - AWS::S3ObjectLambda::AccessPoint // - AWS::S3Outposts::Object // - AWS::SSMMessages::ControlChannel + // - AWS::ThinClient::Device + // - AWS::ThinClient::Environment // - AWS::Timestream::Database // - AWS::Timestream::Table // - AWS::VerifiedPermissions::PolicyStore You can have only one resources.type @@ -111,9 +134,24 @@ type AdvancedFieldSelector struct { // - arn::dynamodb:::table/ When resources.type equals AWS::Lambda::Function , // and the operator is set to Equals or NotEquals , the ARN must be in the // following format: - // - arn::lambda:::function: When resources.type equals AWS::CloudTrail::Channel + // - arn::lambda:::function: When resources.type equals AWS::B2BI::Transformer , + // and the operator is set to Equals or NotEquals , the ARN must be in the + // following format: + // - arn::b2bi:::transformer/ When resources.type equals AWS::Bedrock::AgentAlias // , and the operator is set to Equals or NotEquals , the ARN must be in the // following format: + // - arn::bedrock:::agent-alias// When resources.type equals + // AWS::Bedrock::KnowledgeBase , and the operator is set to Equals or NotEquals , + // the ARN must be in the following format: + // - arn::bedrock:::knowledge-base/ When resources.type equals + // AWS::Cassandra::Table , and the operator is set to Equals or NotEquals , the + // ARN must be in the following format: + // - arn::cassandra:::/keyspace//table/ When resources.type equals + // AWS::CloudFront::KeyValueStore , and the operator is set to Equals or + // NotEquals , the ARN must be in the following format: + // - arn::cloudfront:::key-value-store/ When resources.type equals + // AWS::CloudTrail::Channel , and the operator is set to Equals or NotEquals , + // the ARN must be in the following format: // - arn::cloudtrail:::channel/ When resources.type equals // AWS::CodeWhisperer::Customization , and the operator is set to Equals or // NotEquals , the ARN must be in the following format: @@ -142,12 +180,18 @@ type AdvancedFieldSelector struct { // and the operator is set to Equals or NotEquals , the ARN must be in the // following format: // - arn::guardduty:::detector/ When resources.type equals + // AWS::IoTTwinMaker::Entity , and the operator is set to Equals or NotEquals , + // the ARN must be in the following format: + // - arn::iottwinmaker:::workspace//entity/ When resources.type equals + // AWS::IoTTwinMaker::Workspace , and the operator is set to Equals or NotEquals + // , the ARN must be in the following format: + // - arn::iottwinmaker:::workspace/ When resources.type equals // AWS::KendraRanking::ExecutionPlan , and the operator is set to Equals or // NotEquals , the ARN must be in the following format: // - arn::kendra-ranking:::rescore-execution-plan/ When resources.type equals // AWS::KinesisVideo::Stream , and the operator is set to Equals or NotEquals , // the ARN must be in the following format: - // - arn::kinesisvideo:::stream/ When resources.type equals + // - arn::kinesisvideo:::stream// When resources.type equals // AWS::ManagedBlockchain::Network , and the operator is set to Equals or // NotEquals , the ARN must be in the following format: // - arn::managedblockchain:::networks/ When resources.type equals @@ -157,11 +201,29 @@ type AdvancedFieldSelector struct { // AWS::MedicalImaging::Datastore , and the operator is set to Equals or // NotEquals , the ARN must be in the following format: // - arn::medical-imaging:::datastore/ When resources.type equals + // AWS::NeptuneGraph::Graph , and the operator is set to Equals or NotEquals , + // the ARN must be in the following format: + // - arn::neptune-graph:::graph/ When resources.type equals // AWS::PCAConnectorAD::Connector , and the operator is set to Equals or // NotEquals , the ARN must be in the following format: // - arn::pca-connector-ad:::connector/ When resources.type equals - // AWS::SageMaker::Endpoint , and the operator is set to Equals or NotEquals , + // AWS::QBusiness::Application , and the operator is set to Equals or NotEquals , // the ARN must be in the following format: + // - arn::qbusiness:::application/ When resources.type equals + // AWS::QBusiness::DataSource , and the operator is set to Equals or NotEquals , + // the ARN must be in the following format: + // - arn::qbusiness:::application//index//data-source/ When resources.type equals + // AWS::QBusiness::Index , and the operator is set to Equals or NotEquals , the + // ARN must be in the following format: + // - arn::qbusiness:::application//index/ When resources.type equals + // AWS::QBusiness::WebExperience , and the operator is set to Equals or NotEquals + // , the ARN must be in the following format: + // - arn::qbusiness:::application//web-experience/ When resources.type equals + // AWS::RDS::DBCluster , and the operator is set to Equals or NotEquals , the ARN + // must be in the following format: + // - arn::rds:::cluster/ When resources.type equals AWS::SageMaker::Endpoint , + // and the operator is set to Equals or NotEquals , the ARN must be in the + // following format: // - arn::sagemaker:::endpoint/ When resources.type equals // AWS::SageMaker::ExperimentTrialComponent , and the operator is set to Equals // or NotEquals , the ARN must be in the following format: @@ -169,12 +231,23 @@ type AdvancedFieldSelector struct { // AWS::SageMaker::FeatureGroup , and the operator is set to Equals or NotEquals // , the ARN must be in the following format: // - arn::sagemaker:::feature-group/ When resources.type equals + // AWS::SCN::Instance , and the operator is set to Equals or NotEquals , the ARN + // must be in the following format: + // - arn::scn:::instance/ When resources.type equals + // AWS::ServiceDiscovery::Namespace , and the operator is set to Equals or + // NotEquals , the ARN must be in the following format: + // - arn::servicediscovery:::namespace/ When resources.type equals + // AWS::ServiceDiscovery::Service , and the operator is set to Equals or + // NotEquals , the ARN must be in the following format: + // - arn::servicediscovery:::service/ When resources.type equals // AWS::SNS::PlatformEndpoint , and the operator is set to Equals or NotEquals , // the ARN must be in the following format: // - arn::sns:::endpoint/// When resources.type equals AWS::SNS::Topic , and the // operator is set to Equals or NotEquals , the ARN must be in the following // format: - // - arn::sns::: When resources.type equals AWS::S3::AccessPoint , and the + // - arn::sns::: When resources.type equals AWS::SQS::Queue , and the operator is + // set to Equals or NotEquals , the ARN must be in the following format: + // - arn::sqs::: When resources.type equals AWS::S3::AccessPoint , and the // operator is set to Equals or NotEquals , the ARN must be in one of the // following formats. To log events on all objects in an S3 access point, we // recommend that you use only the access point ARN, don’t include the object path, @@ -190,6 +263,12 @@ type AdvancedFieldSelector struct { // AWS::SSMMessages::ControlChannel , and the operator is set to Equals or // NotEquals , the ARN must be in the following format: // - arn::ssmmessages:::control-channel/ When resources.type equals + // AWS::ThinClient::Device , and the operator is set to Equals or NotEquals , the + // ARN must be in the following format: + // - arn::thinclient:::device/ When resources.type equals + // AWS::ThinClient::Environment , and the operator is set to Equals or NotEquals + // , the ARN must be in the following format: + // - arn::thinclient:::environment/ When resources.type equals // AWS::Timestream::Database , and the operator is set to Equals or NotEquals , // the ARN must be in the following format: // - arn::timestream:::database/ When resources.type equals @@ -288,39 +367,10 @@ type DataResource struct { // - AWS::DynamoDB::Table // - AWS::Lambda::Function // - AWS::S3::Object - // The following resource types are also available through advanced event - // selectors. Basic event selector resource types are valid in advanced event - // selectors, but advanced event selector resource types are not valid in basic - // event selectors. For more information, see AdvancedFieldSelector (https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) + // Additional resource types are available through advanced event selectors. For + // more information about these additional resource types, see + // AdvancedFieldSelector (https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) // . - // - AWS::CloudTrail::Channel - // - AWS::CodeWhisperer::Customization - // - AWS::CodeWhisperer::Profile - // - AWS::Cognito::IdentityPool - // - AWS::DynamoDB::Stream - // - AWS::EC2::Snapshot - // - AWS::EMRWAL::Workspace - // - AWS::FinSpace::Environment - // - AWS::Glue::Table - // - AWS::GuardDuty::Detector - // - AWS::KendraRanking::ExecutionPlan - // - AWS::KinesisVideo::Stream - // - AWS::ManagedBlockchain::Network - // - AWS::ManagedBlockchain::Node - // - AWS::MedicalImaging::Datastore - // - AWS::PCAConnectorAD::Connector - // - AWS::SageMaker::Endpoint - // - AWS::SageMaker::ExperimentTrialComponent - // - AWS::SageMaker::FeatureGroup - // - AWS::SNS::PlatformEndpoint - // - AWS::SNS::Topic - // - AWS::S3::AccessPoint - // - AWS::S3ObjectLambda::AccessPoint - // - AWS::S3Outposts::Object - // - AWS::SSMMessages::ControlChannel - // - AWS::Timestream::Database - // - AWS::Timestream::Table - // - AWS::VerifiedPermissions::PolicyStore Type *string // An array of Amazon Resource Name (ARN) strings or partial ARN strings for the @@ -365,7 +415,7 @@ type Destination struct { Location *string // The type of destination for events arriving from a channel. For channels used - // for a CloudTrail Lake integration, the value is EventDataStore . For + // for a CloudTrail Lake integration, the value is EVENT_DATA_STORE . For // service-linked channels, the value is AWS_SERVICE . // // This member is required. diff --git a/service/cloudtrail/validators.go b/service/cloudtrail/validators.go index bddada23f99..b680932e78c 100644 --- a/service/cloudtrail/validators.go +++ b/service/cloudtrail/validators.go @@ -430,6 +430,26 @@ func (m *validateOpListImportFailures) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpListInsightsMetricData struct { +} + +func (*validateOpListInsightsMetricData) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListInsightsMetricData) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListInsightsMetricDataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListInsightsMetricDataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListQueries struct { } @@ -874,6 +894,10 @@ func addOpListImportFailuresValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpListImportFailures{}, middleware.After) } +func addOpListInsightsMetricDataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListInsightsMetricData{}, middleware.After) +} + func addOpListQueriesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListQueries{}, middleware.After) } @@ -1514,6 +1538,27 @@ func validateOpListImportFailuresInput(v *ListImportFailuresInput) error { } } +func validateOpListInsightsMetricDataInput(v *ListInsightsMetricDataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListInsightsMetricDataInput"} + if v.EventSource == nil { + invalidParams.Add(smithy.NewErrParamRequired("EventSource")) + } + if v.EventName == nil { + invalidParams.Add(smithy.NewErrParamRequired("EventName")) + } + if len(v.InsightType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("InsightType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListQueriesInput(v *ListQueriesInput) error { if v == nil { return nil diff --git a/service/connect/api_op_GetCurrentMetricData.go b/service/connect/api_op_GetCurrentMetricData.go index 8a423a06d47..0992995b444 100644 --- a/service/connect/api_op_GetCurrentMetricData.go +++ b/service/connect/api_op_GetCurrentMetricData.go @@ -73,9 +73,11 @@ type GetCurrentMetricDataInput struct { // Metric data is retrieved only for the resources associated with the queues or // routing profiles, and by any channels included in the filter. (You cannot filter // by both queue AND routing profile.) You can include both resource IDs and - // resource ARNs in the same request. When using RoutingStepExpression , you need - // to pass exactly one QueueId . Currently tagging is only supported on the - // resources that are passed in the filter. + // resource ARNs in the same request. When using the RoutingStepExpression filter, + // you need to pass exactly one QueueId . The filter is also case sensitive so when + // using the RoutingStepExpression filter, grouping by ROUTING_STEP_EXPRESSION is + // required. Currently tagging is only supported on the resources that are passed + // in the filter. // // This member is required. Filters *types.Filters diff --git a/service/connect/api_op_GetMetricDataV2.go b/service/connect/api_op_GetMetricDataV2.go index 0e17490ce73..fb58c82672e 100644 --- a/service/connect/api_op_GetMetricDataV2.go +++ b/service/connect/api_op_GetMetricDataV2.go @@ -75,7 +75,9 @@ type GetMetricDataV2Input struct { // conversational analytics. connect:Chat , connect:SMS , connect:Telephony , and // connect:WebRTC are valid filterValue examples (not exhaustive) for the // contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION - // is a valid filter key with a filter value up to 3000 length. + // is a valid filter key with a filter value up to 3000 length. This filter is case + // and order sensitive. JSON string fields must be sorted in ascending order and + // JSON array order should be kept as is. // // This member is required. Filters []types.FilterV2 @@ -179,57 +181,62 @@ type GetMetricDataV2Input struct { // INITIATION_METHOD , DISCONNECT_REASON Valid groupings and filters: Queue, // Channel, Routing Profile, Agent, Agent Hierarchy, Feature, // contact/segmentAttributes/connect:Subtype, RoutingStepExpression Feature is a - // valid filter but not a valid grouping. CONTACTS_HOLD_ABANDONS Unit: Count Valid - // groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - // contact/segmentAttributes/connect:Subtype CONTACTS_ON_HOLD_AGENT_DISCONNECT + // valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT + // Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and + // filters: Queue, Channel, Agent, Agent Hierarchy, + // contact/segmentAttributes/connect:Subtype CONTACTS_HOLD_ABANDONS Unit: Count + // Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + // Hierarchy, contact/segmentAttributes/connect:Subtype + // CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: + // Queue, Channel, Routing Profile, Agent, Agent Hierarchy + // CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: + // Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_PUT_ON_HOLD // Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, - // Agent Hierarchy CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings + // Agent Hierarchy CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings // and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - // CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, - // Routing Profile, Agent, Agent Hierarchy CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: + // CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: + // Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_QUEUED Unit: // Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - // Hierarchy CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and - // filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_QUEUED + // Hierarchy, contact/segmentAttributes/connect:Subtype CONTACTS_QUEUED_BY_ENQUEUE + // Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, + // contact/segmentAttributes/connect:Subtype CONTACTS_RESOLVED_IN_X Unit: Count + // Valid groupings and filters: Queue, Channel, Routing Profile, + // contact/segmentAttributes/connect:Subtype Threshold: For ThresholdValue enter + // any whole number from 1 to 604800 (inclusive), in seconds. For Comparison , you + // must enter LT (for "Less than"). CONTACTS_TRANSFERRED_OUT Unit: Count Valid + // groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + // Feature, contact/segmentAttributes/connect:Subtype Feature is a valid filter but + // not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid + // groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + // contact/segmentAttributes/connect:Subtype CONTACTS_TRANSFERRED_OUT_FROM_QUEUE // Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, + // Agent Hierarchy, contact/segmentAttributes/connect:Subtype MAX_QUEUED_TIME Unit: + // Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, // Agent Hierarchy, contact/segmentAttributes/connect:Subtype - // CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, - // Routing Profile, contact/segmentAttributes/connect:Subtype Threshold: For - // ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. - // For Comparison , you must enter LT (for "Less than"). CONTACTS_TRANSFERRED_OUT - // Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, - // Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype Feature is a - // valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: - // Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - // Hierarchy, contact/segmentAttributes/connect:Subtype - // CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: + // PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, + // RoutingStepExpression PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings + // and filters: Queue, RoutingStepExpression PERCENT_NON_TALK_TIME This metric is + // available only for contacts analyzed by Contact Lens conversational analytics. + // Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, + // Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype + // PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact + // Lens conversational analytics. Unit: Percentage Valid groupings and filters: // Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - // contact/segmentAttributes/connect:Subtype MAX_QUEUED_TIME Unit: Seconds Valid - // groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - // contact/segmentAttributes/connect:Subtype PERCENT_CONTACTS_STEP_EXPIRED Unit: - // Percent Valid groupings and filters: Queue, RoutingStepExpression - // PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, - // RoutingStepExpression PERCENT_NON_TALK_TIME This metric is available only for - // contacts analyzed by Contact Lens conversational analytics. Unit: Percentage - // Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - // Hierarchy, contact/segmentAttributes/connect:Subtype PERCENT_TALK_TIME This - // metric is available only for contacts analyzed by Contact Lens conversational - // analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing - // Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype - // PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by - // Contact Lens conversational analytics. Unit: Percentage Valid groupings and + // contact/segmentAttributes/connect:Subtype PERCENT_TALK_TIME_AGENT This metric is + // available only for contacts analyzed by Contact Lens conversational analytics. + // Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, + // Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype + // PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed + // by Contact Lens conversational analytics. Unit: Percentage Valid groupings and // filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - // contact/segmentAttributes/connect:Subtype PERCENT_TALK_TIME_CUSTOMER This metric - // is available only for contacts analyzed by Contact Lens conversational - // analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing - // Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype - // SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: - // Percent Valid groupings and filters: Queue, Channel, Routing Profile Threshold: - // For ThresholdValue , enter any whole number from 1 to 604800 (inclusive), in - // seconds. For Comparison , you must enter LT (for "Less than"). - // STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, - // RoutingStepExpression SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings - // and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - // SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: + // contact/segmentAttributes/connect:Subtype SERVICE_LEVEL You can include up to 20 + // SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: + // Queue, Channel, Routing Profile Threshold: For ThresholdValue , enter any whole + // number from 1 to 604800 (inclusive), in seconds. For Comparison , you must enter + // LT (for "Less than"). STEP_CONTACTS_QUEUED Unit: Count Valid groupings and + // filters: Queue, RoutingStepExpression SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds + // Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + // Hierarchy SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: // INITIATION_METHOD . This metric only supports the following filter keys as // INITIATION_METHOD : INBOUND | OUTBOUND | CALLBACK | API Valid groupings and // filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy The Negate key diff --git a/service/connect/api_op_GetTrafficDistribution.go b/service/connect/api_op_GetTrafficDistribution.go index bbf54ea3c79..f0fe9cda60e 100644 --- a/service/connect/api_op_GetTrafficDistribution.go +++ b/service/connect/api_op_GetTrafficDistribution.go @@ -31,7 +31,9 @@ func (c *Client) GetTrafficDistribution(ctx context.Context, params *GetTrafficD type GetTrafficDistributionInput struct { - // The identifier of the traffic distribution group. + // The identifier of the traffic distribution group. This can be the ID or the ARN + // if the API is being called in the Region where the traffic distribution group + // was created. The ARN must be provided if the call is from the replicated Region. // // This member is required. Id *string diff --git a/service/drs/api_op_DescribeJobLogItems.go b/service/drs/api_op_DescribeJobLogItems.go index 42268a1f993..b3ee86cd648 100644 --- a/service/drs/api_op_DescribeJobLogItems.go +++ b/service/drs/api_op_DescribeJobLogItems.go @@ -36,7 +36,7 @@ type DescribeJobLogItemsInput struct { JobID *string // Maximum number of Job log items to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next Job log items to retrieve. NextToken *string @@ -172,8 +172,8 @@ func NewDescribeJobLogItemsPaginator(client DescribeJobLogItemsAPIClient, params } options := DescribeJobLogItemsPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -203,7 +203,11 @@ func (p *DescribeJobLogItemsPaginator) NextPage(ctx context.Context, optFns ...f params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeJobLogItems(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_DescribeJobs.go b/service/drs/api_op_DescribeJobs.go index d58e7252f9d..8c33df158e8 100644 --- a/service/drs/api_op_DescribeJobs.go +++ b/service/drs/api_op_DescribeJobs.go @@ -39,7 +39,7 @@ type DescribeJobsInput struct { Filters *types.DescribeJobsRequestFilters // Maximum number of Jobs to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next Job to retrieve. NextToken *string @@ -170,8 +170,8 @@ func NewDescribeJobsPaginator(client DescribeJobsAPIClient, params *DescribeJobs } options := DescribeJobsPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -201,7 +201,11 @@ func (p *DescribeJobsPaginator) NextPage(ctx context.Context, optFns ...func(*Op params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeJobs(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_DescribeLaunchConfigurationTemplates.go b/service/drs/api_op_DescribeLaunchConfigurationTemplates.go index 08dcfe84831..8bd9ed4d8df 100644 --- a/service/drs/api_op_DescribeLaunchConfigurationTemplates.go +++ b/service/drs/api_op_DescribeLaunchConfigurationTemplates.go @@ -36,7 +36,7 @@ type DescribeLaunchConfigurationTemplatesInput struct { LaunchConfigurationTemplateIDs []string // Maximum results to be returned in DescribeLaunchConfigurationTemplates. - MaxResults int32 + MaxResults *int32 // The token of the next Launch Configuration Template to retrieve. NextToken *string @@ -171,8 +171,8 @@ func NewDescribeLaunchConfigurationTemplatesPaginator(client DescribeLaunchConfi } options := DescribeLaunchConfigurationTemplatesPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -202,7 +202,11 @@ func (p *DescribeLaunchConfigurationTemplatesPaginator) NextPage(ctx context.Con params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeLaunchConfigurationTemplates(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_DescribeRecoveryInstances.go b/service/drs/api_op_DescribeRecoveryInstances.go index 5acee3172e0..916b7ae8650 100644 --- a/service/drs/api_op_DescribeRecoveryInstances.go +++ b/service/drs/api_op_DescribeRecoveryInstances.go @@ -34,7 +34,7 @@ type DescribeRecoveryInstancesInput struct { Filters *types.DescribeRecoveryInstancesRequestFilters // Maximum number of Recovery Instances to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next Recovery Instance to retrieve. NextToken *string @@ -168,8 +168,8 @@ func NewDescribeRecoveryInstancesPaginator(client DescribeRecoveryInstancesAPICl } options := DescribeRecoveryInstancesPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -199,7 +199,11 @@ func (p *DescribeRecoveryInstancesPaginator) NextPage(ctx context.Context, optFn params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeRecoveryInstances(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_DescribeRecoverySnapshots.go b/service/drs/api_op_DescribeRecoverySnapshots.go index 9dc6966fed4..efc50716dc3 100644 --- a/service/drs/api_op_DescribeRecoverySnapshots.go +++ b/service/drs/api_op_DescribeRecoverySnapshots.go @@ -39,7 +39,7 @@ type DescribeRecoverySnapshotsInput struct { Filters *types.DescribeRecoverySnapshotsRequestFilters // Maximum number of Recovery Snapshots to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next Recovery Snapshot to retrieve. NextToken *string @@ -179,8 +179,8 @@ func NewDescribeRecoverySnapshotsPaginator(client DescribeRecoverySnapshotsAPICl } options := DescribeRecoverySnapshotsPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -210,7 +210,11 @@ func (p *DescribeRecoverySnapshotsPaginator) NextPage(ctx context.Context, optFn params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeRecoverySnapshots(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_DescribeReplicationConfigurationTemplates.go b/service/drs/api_op_DescribeReplicationConfigurationTemplates.go index dc952e8134a..6ebd69f2e5c 100644 --- a/service/drs/api_op_DescribeReplicationConfigurationTemplates.go +++ b/service/drs/api_op_DescribeReplicationConfigurationTemplates.go @@ -31,7 +31,7 @@ func (c *Client) DescribeReplicationConfigurationTemplates(ctx context.Context, type DescribeReplicationConfigurationTemplatesInput struct { // Maximum number of Replication Configuration Templates to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next Replication Configuration Template to retrieve. NextToken *string @@ -170,8 +170,8 @@ func NewDescribeReplicationConfigurationTemplatesPaginator(client DescribeReplic } options := DescribeReplicationConfigurationTemplatesPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -201,7 +201,11 @@ func (p *DescribeReplicationConfigurationTemplatesPaginator) NextPage(ctx contex params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeReplicationConfigurationTemplates(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_DescribeSourceNetworks.go b/service/drs/api_op_DescribeSourceNetworks.go index 0f0f6b04800..6dadc3db47b 100644 --- a/service/drs/api_op_DescribeSourceNetworks.go +++ b/service/drs/api_op_DescribeSourceNetworks.go @@ -34,7 +34,7 @@ type DescribeSourceNetworksInput struct { Filters *types.DescribeSourceNetworksRequestFilters // Maximum number of Source Networks to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next Source Networks to retrieve. NextToken *string @@ -167,8 +167,8 @@ func NewDescribeSourceNetworksPaginator(client DescribeSourceNetworksAPIClient, } options := DescribeSourceNetworksPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -198,7 +198,11 @@ func (p *DescribeSourceNetworksPaginator) NextPage(ctx context.Context, optFns . params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeSourceNetworks(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_DescribeSourceServers.go b/service/drs/api_op_DescribeSourceServers.go index b2564d2e758..a8b13016d5c 100644 --- a/service/drs/api_op_DescribeSourceServers.go +++ b/service/drs/api_op_DescribeSourceServers.go @@ -34,7 +34,7 @@ type DescribeSourceServersInput struct { Filters *types.DescribeSourceServersRequestFilters // Maximum number of Source Servers to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next Source Server to retrieve. NextToken *string @@ -167,8 +167,8 @@ func NewDescribeSourceServersPaginator(client DescribeSourceServersAPIClient, pa } options := DescribeSourceServersPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -198,7 +198,11 @@ func (p *DescribeSourceServersPaginator) NextPage(ctx context.Context, optFns .. params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.DescribeSourceServers(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_ListExtensibleSourceServers.go b/service/drs/api_op_ListExtensibleSourceServers.go index 15dd195b0f5..730b4e1bd5a 100644 --- a/service/drs/api_op_ListExtensibleSourceServers.go +++ b/service/drs/api_op_ListExtensibleSourceServers.go @@ -39,7 +39,7 @@ type ListExtensibleSourceServersInput struct { StagingAccountID *string // The maximum number of extensible source servers to retrieve. - MaxResults int32 + MaxResults *int32 // The token of the next extensible source server to retrieve. NextToken *string @@ -177,8 +177,8 @@ func NewListExtensibleSourceServersPaginator(client ListExtensibleSourceServersA } options := ListExtensibleSourceServersPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -208,7 +208,11 @@ func (p *ListExtensibleSourceServersPaginator) NextPage(ctx context.Context, opt params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.ListExtensibleSourceServers(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_ListLaunchActions.go b/service/drs/api_op_ListLaunchActions.go index fd8c5a28f7d..73aa45f3d8f 100644 --- a/service/drs/api_op_ListLaunchActions.go +++ b/service/drs/api_op_ListLaunchActions.go @@ -39,7 +39,7 @@ type ListLaunchActionsInput struct { Filters *types.LaunchActionsRequestFilters // Maximum amount of items to return when listing resource launch actions. - MaxResults int32 + MaxResults *int32 // Next token to use when listing resource launch actions. NextToken *string @@ -174,8 +174,8 @@ func NewListLaunchActionsPaginator(client ListLaunchActionsAPIClient, params *Li } options := ListLaunchActionsPaginatorOptions{} - if params.MaxResults != 0 { - options.Limit = params.MaxResults + if params.MaxResults != nil { + options.Limit = *params.MaxResults } for _, fn := range optFns { @@ -205,7 +205,11 @@ func (p *ListLaunchActionsPaginator) NextPage(ctx context.Context, optFns ...fun params := *p.params params.NextToken = p.nextToken - params.MaxResults = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit result, err := p.client.ListLaunchActions(ctx, ¶ms, optFns...) if err != nil { diff --git a/service/drs/api_op_PutLaunchAction.go b/service/drs/api_op_PutLaunchAction.go index 127f07190b7..6cfc9b6f4b9 100644 --- a/service/drs/api_op_PutLaunchAction.go +++ b/service/drs/api_op_PutLaunchAction.go @@ -73,7 +73,7 @@ type PutLaunchActionInput struct { // Launch action order. // // This member is required. - Order int32 + Order *int32 // Launch configuration template Id or Source Server Id // @@ -113,7 +113,7 @@ type PutLaunchActionOutput struct { Optional *bool // Launch action order. - Order int32 + Order *int32 // Launch action parameters. Parameters map[string]types.LaunchActionParameter diff --git a/service/drs/deserializers.go b/service/drs/deserializers.go index 41e8b9a6ea7..97c980b66db 100644 --- a/service/drs/deserializers.go +++ b/service/drs/deserializers.go @@ -5210,7 +5210,7 @@ func awsRestjson1_deserializeOpDocumentPutLaunchActionOutput(v **PutLaunchAction if err != nil { return err } - sv.Order = int32(i64) + sv.Order = ptr.Int32(int32(i64)) } case "parameters": @@ -10216,7 +10216,7 @@ func awsRestjson1_deserializeDocumentLaunchAction(v **types.LaunchAction, value if err != nil { return err } - sv.Order = int32(i64) + sv.Order = ptr.Int32(int32(i64)) } case "parameters": @@ -11334,7 +11334,7 @@ func awsRestjson1_deserializeDocumentPITPolicyRule(v **types.PITPolicyRule, valu if err != nil { return err } - sv.Interval = int32(i64) + sv.Interval = ptr.Int32(int32(i64)) } case "retentionDuration": @@ -11347,7 +11347,7 @@ func awsRestjson1_deserializeDocumentPITPolicyRule(v **types.PITPolicyRule, valu if err != nil { return err } - sv.RetentionDuration = int32(i64) + sv.RetentionDuration = ptr.Int32(int32(i64)) } case "ruleID": diff --git a/service/drs/serializers.go b/service/drs/serializers.go index 2510e9ab380..91102857ad8 100644 --- a/service/drs/serializers.go +++ b/service/drs/serializers.go @@ -1142,9 +1142,9 @@ func awsRestjson1_serializeOpDocumentDescribeJobLogItemsInput(v *DescribeJobLogI ok.String(*v.JobID) } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -1229,9 +1229,9 @@ func awsRestjson1_serializeOpDocumentDescribeJobsInput(v *DescribeJobsInput, val } } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -1316,9 +1316,9 @@ func awsRestjson1_serializeOpDocumentDescribeLaunchConfigurationTemplatesInput(v } } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -1403,9 +1403,9 @@ func awsRestjson1_serializeOpDocumentDescribeRecoveryInstancesInput(v *DescribeR } } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -1490,9 +1490,9 @@ func awsRestjson1_serializeOpDocumentDescribeRecoverySnapshotsInput(v *DescribeR } } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -1580,9 +1580,9 @@ func awsRestjson1_serializeOpDocumentDescribeReplicationConfigurationTemplatesIn object := value.Object() defer object.Close() - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -1674,9 +1674,9 @@ func awsRestjson1_serializeOpDocumentDescribeSourceNetworksInput(v *DescribeSour } } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -1761,9 +1761,9 @@ func awsRestjson1_serializeOpDocumentDescribeSourceServersInput(v *DescribeSourc } } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -2343,9 +2343,9 @@ func awsRestjson1_serializeOpDocumentListExtensibleSourceServersInput(v *ListExt object := value.Object() defer object.Close() - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -2435,9 +2435,9 @@ func awsRestjson1_serializeOpDocumentListLaunchActionsInput(v *ListLaunchActions } } - if v.MaxResults != 0 { + if v.MaxResults != nil { ok := object.Key("maxResults") - ok.Integer(v.MaxResults) + ok.Integer(*v.MaxResults) } if v.NextToken != nil { @@ -2689,9 +2689,9 @@ func awsRestjson1_serializeOpDocumentPutLaunchActionInput(v *PutLaunchActionInpu ok.Boolean(*v.Optional) } - { + if v.Order != nil { ok := object.Key("order") - ok.Integer(v.Order) + ok.Integer(*v.Order) } if v.Parameters != nil { @@ -4648,14 +4648,14 @@ func awsRestjson1_serializeDocumentPITPolicyRule(v *types.PITPolicyRule, value s ok.Boolean(*v.Enabled) } - { + if v.Interval != nil { ok := object.Key("interval") - ok.Integer(v.Interval) + ok.Integer(*v.Interval) } - { + if v.RetentionDuration != nil { ok := object.Key("retentionDuration") - ok.Integer(v.RetentionDuration) + ok.Integer(*v.RetentionDuration) } if v.RuleID != 0 { diff --git a/service/drs/types/types.go b/service/drs/types/types.go index 1c3ad134895..b5879e3c114 100644 --- a/service/drs/types/types.go +++ b/service/drs/types/types.go @@ -364,7 +364,7 @@ type LaunchAction struct { Optional *bool // Launch action order. - Order int32 + Order *int32 // Launch action parameters. Parameters map[string]LaunchActionParameter @@ -616,12 +616,12 @@ type PITPolicyRule struct { // How often, in the chosen units, a snapshot should be taken. // // This member is required. - Interval int32 + Interval *int32 // The duration to retain a snapshot for, in the chosen units. // // This member is required. - RetentionDuration int32 + RetentionDuration *int32 // The units used to measure the interval and retentionDuration. // diff --git a/service/drs/validators.go b/service/drs/validators.go index 83f81bf44d5..5030813e7c2 100644 --- a/service/drs/validators.go +++ b/service/drs/validators.go @@ -1019,6 +1019,12 @@ func validatePITPolicyRule(v *types.PITPolicyRule) error { if len(v.Units) == 0 { invalidParams.Add(smithy.NewErrParamRequired("Units")) } + if v.Interval == nil { + invalidParams.Add(smithy.NewErrParamRequired("Interval")) + } + if v.RetentionDuration == nil { + invalidParams.Add(smithy.NewErrParamRequired("RetentionDuration")) + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -1477,6 +1483,9 @@ func validateOpPutLaunchActionInput(v *PutLaunchActionInput) error { if v.ActionCode == nil { invalidParams.Add(smithy.NewErrParamRequired("ActionCode")) } + if v.Order == nil { + invalidParams.Add(smithy.NewErrParamRequired("Order")) + } if v.ActionId == nil { invalidParams.Add(smithy.NewErrParamRequired("ActionId")) } diff --git a/service/firehose/api_op_CreateDeliveryStream.go b/service/firehose/api_op_CreateDeliveryStream.go index 564fa35f460..a3f7f1ab4cf 100644 --- a/service/firehose/api_op_CreateDeliveryStream.go +++ b/service/firehose/api_op_CreateDeliveryStream.go @@ -140,6 +140,9 @@ type CreateDeliveryStreamInput struct { // Deprecated: This member has been deprecated. S3DestinationConfiguration *types.S3DestinationConfiguration + // Configure Snowflake destination + SnowflakeDestinationConfiguration *types.SnowflakeDestinationConfiguration + // The destination in Splunk. You can specify only one destination. SplunkDestinationConfiguration *types.SplunkDestinationConfiguration diff --git a/service/firehose/api_op_UpdateDestination.go b/service/firehose/api_op_UpdateDestination.go index 1d51027533b..aa38d4d7dd9 100644 --- a/service/firehose/api_op_UpdateDestination.go +++ b/service/firehose/api_op_UpdateDestination.go @@ -98,6 +98,9 @@ type UpdateDestinationInput struct { // Deprecated: This member has been deprecated. S3DestinationUpdate *types.S3DestinationUpdate + // Update to the Snowflake destination condiguration settings + SnowflakeDestinationUpdate *types.SnowflakeDestinationUpdate + // Describes an update for a destination in Splunk. SplunkDestinationUpdate *types.SplunkDestinationUpdate diff --git a/service/firehose/deserializers.go b/service/firehose/deserializers.go index 4f3613c8c5d..4e312db8659 100644 --- a/service/firehose/deserializers.go +++ b/service/firehose/deserializers.go @@ -2843,6 +2843,11 @@ func awsAwsjson11_deserializeDocumentDestinationDescription(v **types.Destinatio return err } + case "SnowflakeDestinationDescription": + if err := awsAwsjson11_deserializeDocumentSnowflakeDestinationDescription(&sv.SnowflakeDestinationDescription, value); err != nil { + return err + } + case "SplunkDestinationDescription": if err := awsAwsjson11_deserializeDocumentSplunkDestinationDescription(&sv.SplunkDestinationDescription, value); err != nil { return err @@ -5561,6 +5566,290 @@ func awsAwsjson11_deserializeDocumentServiceUnavailableException(v **types.Servi return nil } +func awsAwsjson11_deserializeDocumentSnowflakeDestinationDescription(v **types.SnowflakeDestinationDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnowflakeDestinationDescription + if *v == nil { + sv = &types.SnowflakeDestinationDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AccountUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeAccountUrl to be of type string, got %T instead", value) + } + sv.AccountUrl = ptr.String(jtv) + } + + case "CloudWatchLoggingOptions": + if err := awsAwsjson11_deserializeDocumentCloudWatchLoggingOptions(&sv.CloudWatchLoggingOptions, value); err != nil { + return err + } + + case "ContentColumnName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeContentColumnName to be of type string, got %T instead", value) + } + sv.ContentColumnName = ptr.String(jtv) + } + + case "Database": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeDatabase to be of type string, got %T instead", value) + } + sv.Database = ptr.String(jtv) + } + + case "DataLoadingOption": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeDataLoadingOption to be of type string, got %T instead", value) + } + sv.DataLoadingOption = types.SnowflakeDataLoadingOption(jtv) + } + + case "MetaDataColumnName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeMetaDataColumnName to be of type string, got %T instead", value) + } + sv.MetaDataColumnName = ptr.String(jtv) + } + + case "ProcessingConfiguration": + if err := awsAwsjson11_deserializeDocumentProcessingConfiguration(&sv.ProcessingConfiguration, value); err != nil { + return err + } + + case "RetryOptions": + if err := awsAwsjson11_deserializeDocumentSnowflakeRetryOptions(&sv.RetryOptions, value); err != nil { + return err + } + + case "RoleARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleARN to be of type string, got %T instead", value) + } + sv.RoleARN = ptr.String(jtv) + } + + case "S3BackupMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeS3BackupMode to be of type string, got %T instead", value) + } + sv.S3BackupMode = types.SnowflakeS3BackupMode(jtv) + } + + case "S3DestinationDescription": + if err := awsAwsjson11_deserializeDocumentS3DestinationDescription(&sv.S3DestinationDescription, value); err != nil { + return err + } + + case "Schema": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeSchema to be of type string, got %T instead", value) + } + sv.Schema = ptr.String(jtv) + } + + case "SnowflakeRoleConfiguration": + if err := awsAwsjson11_deserializeDocumentSnowflakeRoleConfiguration(&sv.SnowflakeRoleConfiguration, value); err != nil { + return err + } + + case "SnowflakeVpcConfiguration": + if err := awsAwsjson11_deserializeDocumentSnowflakeVpcConfiguration(&sv.SnowflakeVpcConfiguration, value); err != nil { + return err + } + + case "Table": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeTable to be of type string, got %T instead", value) + } + sv.Table = ptr.String(jtv) + } + + case "User": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeUser to be of type string, got %T instead", value) + } + sv.User = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnowflakeRetryOptions(v **types.SnowflakeRetryOptions, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnowflakeRetryOptions + if *v == nil { + sv = &types.SnowflakeRetryOptions{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DurationInSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected SnowflakeRetryDurationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DurationInSeconds = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnowflakeRoleConfiguration(v **types.SnowflakeRoleConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnowflakeRoleConfiguration + if *v == nil { + sv = &types.SnowflakeRoleConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanObject to be of type *bool, got %T instead", value) + } + sv.Enabled = ptr.Bool(jtv) + } + + case "SnowflakeRole": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakeRole to be of type string, got %T instead", value) + } + sv.SnowflakeRole = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnowflakeVpcConfiguration(v **types.SnowflakeVpcConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnowflakeVpcConfiguration + if *v == nil { + sv = &types.SnowflakeVpcConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PrivateLinkVpceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SnowflakePrivateLinkVpceId to be of type string, got %T instead", value) + } + sv.PrivateLinkVpceId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentSourceDescription(v **types.SourceDescription, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/firehose/serializers.go b/service/firehose/serializers.go index 89c2181d55e..7f2f8037a39 100644 --- a/service/firehose/serializers.go +++ b/service/firehose/serializers.go @@ -2571,6 +2571,258 @@ func awsAwsjson11_serializeDocumentSerializer(v *types.Serializer, value smithyj return nil } +func awsAwsjson11_serializeDocumentSnowflakeDestinationConfiguration(v *types.SnowflakeDestinationConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccountUrl != nil { + ok := object.Key("AccountUrl") + ok.String(*v.AccountUrl) + } + + if v.CloudWatchLoggingOptions != nil { + ok := object.Key("CloudWatchLoggingOptions") + if err := awsAwsjson11_serializeDocumentCloudWatchLoggingOptions(v.CloudWatchLoggingOptions, ok); err != nil { + return err + } + } + + if v.ContentColumnName != nil { + ok := object.Key("ContentColumnName") + ok.String(*v.ContentColumnName) + } + + if v.Database != nil { + ok := object.Key("Database") + ok.String(*v.Database) + } + + if len(v.DataLoadingOption) > 0 { + ok := object.Key("DataLoadingOption") + ok.String(string(v.DataLoadingOption)) + } + + if v.KeyPassphrase != nil { + ok := object.Key("KeyPassphrase") + ok.String(*v.KeyPassphrase) + } + + if v.MetaDataColumnName != nil { + ok := object.Key("MetaDataColumnName") + ok.String(*v.MetaDataColumnName) + } + + if v.PrivateKey != nil { + ok := object.Key("PrivateKey") + ok.String(*v.PrivateKey) + } + + if v.ProcessingConfiguration != nil { + ok := object.Key("ProcessingConfiguration") + if err := awsAwsjson11_serializeDocumentProcessingConfiguration(v.ProcessingConfiguration, ok); err != nil { + return err + } + } + + if v.RetryOptions != nil { + ok := object.Key("RetryOptions") + if err := awsAwsjson11_serializeDocumentSnowflakeRetryOptions(v.RetryOptions, ok); err != nil { + return err + } + } + + if v.RoleARN != nil { + ok := object.Key("RoleARN") + ok.String(*v.RoleARN) + } + + if len(v.S3BackupMode) > 0 { + ok := object.Key("S3BackupMode") + ok.String(string(v.S3BackupMode)) + } + + if v.S3Configuration != nil { + ok := object.Key("S3Configuration") + if err := awsAwsjson11_serializeDocumentS3DestinationConfiguration(v.S3Configuration, ok); err != nil { + return err + } + } + + if v.Schema != nil { + ok := object.Key("Schema") + ok.String(*v.Schema) + } + + if v.SnowflakeRoleConfiguration != nil { + ok := object.Key("SnowflakeRoleConfiguration") + if err := awsAwsjson11_serializeDocumentSnowflakeRoleConfiguration(v.SnowflakeRoleConfiguration, ok); err != nil { + return err + } + } + + if v.SnowflakeVpcConfiguration != nil { + ok := object.Key("SnowflakeVpcConfiguration") + if err := awsAwsjson11_serializeDocumentSnowflakeVpcConfiguration(v.SnowflakeVpcConfiguration, ok); err != nil { + return err + } + } + + if v.Table != nil { + ok := object.Key("Table") + ok.String(*v.Table) + } + + if v.User != nil { + ok := object.Key("User") + ok.String(*v.User) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSnowflakeDestinationUpdate(v *types.SnowflakeDestinationUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccountUrl != nil { + ok := object.Key("AccountUrl") + ok.String(*v.AccountUrl) + } + + if v.CloudWatchLoggingOptions != nil { + ok := object.Key("CloudWatchLoggingOptions") + if err := awsAwsjson11_serializeDocumentCloudWatchLoggingOptions(v.CloudWatchLoggingOptions, ok); err != nil { + return err + } + } + + if v.ContentColumnName != nil { + ok := object.Key("ContentColumnName") + ok.String(*v.ContentColumnName) + } + + if v.Database != nil { + ok := object.Key("Database") + ok.String(*v.Database) + } + + if len(v.DataLoadingOption) > 0 { + ok := object.Key("DataLoadingOption") + ok.String(string(v.DataLoadingOption)) + } + + if v.KeyPassphrase != nil { + ok := object.Key("KeyPassphrase") + ok.String(*v.KeyPassphrase) + } + + if v.MetaDataColumnName != nil { + ok := object.Key("MetaDataColumnName") + ok.String(*v.MetaDataColumnName) + } + + if v.PrivateKey != nil { + ok := object.Key("PrivateKey") + ok.String(*v.PrivateKey) + } + + if v.ProcessingConfiguration != nil { + ok := object.Key("ProcessingConfiguration") + if err := awsAwsjson11_serializeDocumentProcessingConfiguration(v.ProcessingConfiguration, ok); err != nil { + return err + } + } + + if v.RetryOptions != nil { + ok := object.Key("RetryOptions") + if err := awsAwsjson11_serializeDocumentSnowflakeRetryOptions(v.RetryOptions, ok); err != nil { + return err + } + } + + if v.RoleARN != nil { + ok := object.Key("RoleARN") + ok.String(*v.RoleARN) + } + + if len(v.S3BackupMode) > 0 { + ok := object.Key("S3BackupMode") + ok.String(string(v.S3BackupMode)) + } + + if v.S3Update != nil { + ok := object.Key("S3Update") + if err := awsAwsjson11_serializeDocumentS3DestinationUpdate(v.S3Update, ok); err != nil { + return err + } + } + + if v.Schema != nil { + ok := object.Key("Schema") + ok.String(*v.Schema) + } + + if v.SnowflakeRoleConfiguration != nil { + ok := object.Key("SnowflakeRoleConfiguration") + if err := awsAwsjson11_serializeDocumentSnowflakeRoleConfiguration(v.SnowflakeRoleConfiguration, ok); err != nil { + return err + } + } + + if v.Table != nil { + ok := object.Key("Table") + ok.String(*v.Table) + } + + if v.User != nil { + ok := object.Key("User") + ok.String(*v.User) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSnowflakeRetryOptions(v *types.SnowflakeRetryOptions, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DurationInSeconds != nil { + ok := object.Key("DurationInSeconds") + ok.Integer(*v.DurationInSeconds) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSnowflakeRoleConfiguration(v *types.SnowflakeRoleConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Enabled != nil { + ok := object.Key("Enabled") + ok.Boolean(*v.Enabled) + } + + if v.SnowflakeRole != nil { + ok := object.Key("SnowflakeRole") + ok.String(*v.SnowflakeRole) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSnowflakeVpcConfiguration(v *types.SnowflakeVpcConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PrivateLinkVpceId != nil { + ok := object.Key("PrivateLinkVpceId") + ok.String(*v.PrivateLinkVpceId) + } + + return nil +} + func awsAwsjson11_serializeDocumentSplunkBufferingHints(v *types.SplunkBufferingHints, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -2896,6 +3148,13 @@ func awsAwsjson11_serializeOpDocumentCreateDeliveryStreamInput(v *CreateDelivery } } + if v.SnowflakeDestinationConfiguration != nil { + ok := object.Key("SnowflakeDestinationConfiguration") + if err := awsAwsjson11_serializeDocumentSnowflakeDestinationConfiguration(v.SnowflakeDestinationConfiguration, ok); err != nil { + return err + } + } + if v.SplunkDestinationConfiguration != nil { ok := object.Key("SplunkDestinationConfiguration") if err := awsAwsjson11_serializeDocumentSplunkDestinationConfiguration(v.SplunkDestinationConfiguration, ok); err != nil { @@ -3171,6 +3430,13 @@ func awsAwsjson11_serializeOpDocumentUpdateDestinationInput(v *UpdateDestination } } + if v.SnowflakeDestinationUpdate != nil { + ok := object.Key("SnowflakeDestinationUpdate") + if err := awsAwsjson11_serializeDocumentSnowflakeDestinationUpdate(v.SnowflakeDestinationUpdate, ok); err != nil { + return err + } + } + if v.SplunkDestinationUpdate != nil { ok := object.Key("SplunkDestinationUpdate") if err := awsAwsjson11_serializeDocumentSplunkDestinationUpdate(v.SplunkDestinationUpdate, ok); err != nil { diff --git a/service/firehose/types/enums.go b/service/firehose/types/enums.go index 2c9f5253d20..19b1008a116 100644 --- a/service/firehose/types/enums.go +++ b/service/firehose/types/enums.go @@ -541,6 +541,44 @@ func (S3BackupMode) Values() []S3BackupMode { } } +type SnowflakeDataLoadingOption string + +// Enum values for SnowflakeDataLoadingOption +const ( + SnowflakeDataLoadingOptionJsonMapping SnowflakeDataLoadingOption = "JSON_MAPPING" + SnowflakeDataLoadingOptionVariantContentMapping SnowflakeDataLoadingOption = "VARIANT_CONTENT_MAPPING" + SnowflakeDataLoadingOptionVariantContentAndMetadataMapping SnowflakeDataLoadingOption = "VARIANT_CONTENT_AND_METADATA_MAPPING" +) + +// Values returns all known values for SnowflakeDataLoadingOption. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (SnowflakeDataLoadingOption) Values() []SnowflakeDataLoadingOption { + return []SnowflakeDataLoadingOption{ + "JSON_MAPPING", + "VARIANT_CONTENT_MAPPING", + "VARIANT_CONTENT_AND_METADATA_MAPPING", + } +} + +type SnowflakeS3BackupMode string + +// Enum values for SnowflakeS3BackupMode +const ( + SnowflakeS3BackupModeFailedDataOnly SnowflakeS3BackupMode = "FailedDataOnly" + SnowflakeS3BackupModeAllData SnowflakeS3BackupMode = "AllData" +) + +// Values returns all known values for SnowflakeS3BackupMode. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (SnowflakeS3BackupMode) Values() []SnowflakeS3BackupMode { + return []SnowflakeS3BackupMode{ + "FailedDataOnly", + "AllData", + } +} + type SplunkS3BackupMode string // Enum values for SplunkS3BackupMode diff --git a/service/firehose/types/types.go b/service/firehose/types/types.go index 179dc9c098f..84632d8ae79 100644 --- a/service/firehose/types/types.go +++ b/service/firehose/types/types.go @@ -694,6 +694,9 @@ type DestinationDescription struct { // [Deprecated] The destination in Amazon S3. S3DestinationDescription *S3DestinationDescription + // Optional description for the destination + SnowflakeDestinationDescription *SnowflakeDestinationDescription + // The destination in Splunk. SplunkDestinationDescription *SplunkDestinationDescription @@ -2188,6 +2191,294 @@ type Serializer struct { noSmithyDocumentSerde } +// Configure Snowflake destination +type SnowflakeDestinationConfiguration struct { + + // URL for accessing your Snowflake account. This URL must include your account + // identifier (https://docs.snowflake.com/en/user-guide/admin-account-identifier) . + // Note that the protocol (https://) and port number are optional. + // + // This member is required. + AccountUrl *string + + // All data in Snowflake is maintained in databases. + // + // This member is required. + Database *string + + // The private key used to encrypt your Snowflake client. For information, see + // Using Key Pair Authentication & Key Rotation (https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) + // . + // + // This member is required. + PrivateKey *string + + // The Amazon Resource Name (ARN) of the Snowflake role + // + // This member is required. + RoleARN *string + + // Describes the configuration of a destination in Amazon S3. + // + // This member is required. + S3Configuration *S3DestinationConfiguration + + // Each database consists of one or more schemas, which are logical groupings of + // database objects, such as tables and views + // + // This member is required. + Schema *string + + // All data in Snowflake is stored in database tables, logically structured as + // collections of columns and rows. + // + // This member is required. + Table *string + + // User login name for the Snowflake account. + // + // This member is required. + User *string + + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions + + // The name of the record content column + ContentColumnName *string + + // Choose to load JSON keys mapped to table column names or choose to split the + // JSON payload where content is mapped to a record content column and source + // metadata is mapped to a record metadata column. + DataLoadingOption SnowflakeDataLoadingOption + + // Passphrase to decrypt the private key when the key is encrypted. For + // information, see Using Key Pair Authentication & Key Rotation (https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) + // . + KeyPassphrase *string + + // The name of the record metadata column + MetaDataColumnName *string + + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration + + // The time period where Kinesis Data Firehose will retry sending data to the + // chosen HTTP endpoint. + RetryOptions *SnowflakeRetryOptions + + // Choose an S3 backup mode + S3BackupMode SnowflakeS3BackupMode + + // Optionally configure a Snowflake role. Otherwise the default user role will be + // used. + SnowflakeRoleConfiguration *SnowflakeRoleConfiguration + + // The VPCE ID for Firehose to privately connect with Snowflake. The ID format is + // com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon + // PrivateLink & Snowflake (https://docs.snowflake.com/en/user-guide/admin-security-privatelink) + SnowflakeVpcConfiguration *SnowflakeVpcConfiguration + + noSmithyDocumentSerde +} + +// Optional Snowflake destination description +type SnowflakeDestinationDescription struct { + + // URL for accessing your Snowflake account. This URL must include your account + // identifier (https://docs.snowflake.com/en/user-guide/admin-account-identifier) . + // Note that the protocol (https://) and port number are optional. + AccountUrl *string + + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions + + // The name of the record content column + ContentColumnName *string + + // Choose to load JSON keys mapped to table column names or choose to split the + // JSON payload where content is mapped to a record content column and source + // metadata is mapped to a record metadata column. + DataLoadingOption SnowflakeDataLoadingOption + + // All data in Snowflake is maintained in databases. + Database *string + + // The name of the record metadata column + MetaDataColumnName *string + + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration + + // The time period where Kinesis Data Firehose will retry sending data to the + // chosen HTTP endpoint. + RetryOptions *SnowflakeRetryOptions + + // The Amazon Resource Name (ARN) of the Snowflake role + RoleARN *string + + // Choose an S3 backup mode + S3BackupMode SnowflakeS3BackupMode + + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription + + // Each database consists of one or more schemas, which are logical groupings of + // database objects, such as tables and views + Schema *string + + // Optionally configure a Snowflake role. Otherwise the default user role will be + // used. + SnowflakeRoleConfiguration *SnowflakeRoleConfiguration + + // The VPCE ID for Firehose to privately connect with Snowflake. The ID format is + // com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon + // PrivateLink & Snowflake (https://docs.snowflake.com/en/user-guide/admin-security-privatelink) + SnowflakeVpcConfiguration *SnowflakeVpcConfiguration + + // All data in Snowflake is stored in database tables, logically structured as + // collections of columns and rows. + Table *string + + // User login name for the Snowflake account. + User *string + + noSmithyDocumentSerde +} + +// Update to configuration settings +type SnowflakeDestinationUpdate struct { + + // URL for accessing your Snowflake account. This URL must include your account + // identifier (https://docs.snowflake.com/en/user-guide/admin-account-identifier) . + // Note that the protocol (https://) and port number are optional. + AccountUrl *string + + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions + + // The name of the content metadata column + ContentColumnName *string + + // JSON keys mapped to table column names or choose to split the JSON payload + // where content is mapped to a record content column and source metadata is mapped + // to a record metadata column. + DataLoadingOption SnowflakeDataLoadingOption + + // All data in Snowflake is maintained in databases. + Database *string + + // Passphrase to decrypt the private key when the key is encrypted. For + // information, see Using Key Pair Authentication & Key Rotation (https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) + // . + KeyPassphrase *string + + // The name of the record metadata column + MetaDataColumnName *string + + // The private key used to encrypt your Snowflake client. For information, see + // Using Key Pair Authentication & Key Rotation (https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) + // . + PrivateKey *string + + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration + + // Specify how long Kinesis Data Firehose retries sending data to the New Relic + // HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an + // acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment + // doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose + // starts the retry duration counter. It keeps retrying until the retry duration + // expires. After that, Kinesis Data Firehose considers it a data delivery failure + // and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data + // Firehose sends data to the HTTP endpoint (either the initial attempt or a + // retry), it restarts the acknowledgement timeout counter and waits for an + // acknowledgement from the HTTP endpoint. Even if the retry duration expires, + // Kinesis Data Firehose still waits for the acknowledgment until it receives it or + // the acknowledgement timeout period is reached. If the acknowledgment times out, + // Kinesis Data Firehose determines whether there's time left in the retry counter. + // If there is time left, it retries again and repeats the logic until it receives + // an acknowledgment or determines that the retry time has expired. If you don't + // want Kinesis Data Firehose to retry sending data, set this value to 0. + RetryOptions *SnowflakeRetryOptions + + // The Amazon Resource Name (ARN) of the Snowflake role + RoleARN *string + + // Choose an S3 backup mode + S3BackupMode SnowflakeS3BackupMode + + // Describes an update for a destination in Amazon S3. + S3Update *S3DestinationUpdate + + // Each database consists of one or more schemas, which are logical groupings of + // database objects, such as tables and views + Schema *string + + // Optionally configure a Snowflake role. Otherwise the default user role will be + // used. + SnowflakeRoleConfiguration *SnowflakeRoleConfiguration + + // All data in Snowflake is stored in database tables, logically structured as + // collections of columns and rows. + Table *string + + // User login name for the Snowflake account. + User *string + + noSmithyDocumentSerde +} + +// Specify how long Kinesis Data Firehose retries sending data to the New Relic +// HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an +// acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment +// doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose +// starts the retry duration counter. It keeps retrying until the retry duration +// expires. After that, Kinesis Data Firehose considers it a data delivery failure +// and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data +// Firehose sends data to the HTTP endpoint (either the initial attempt or a +// retry), it restarts the acknowledgement timeout counter and waits for an +// acknowledgement from the HTTP endpoint. Even if the retry duration expires, +// Kinesis Data Firehose still waits for the acknowledgment until it receives it or +// the acknowledgement timeout period is reached. If the acknowledgment times out, +// Kinesis Data Firehose determines whether there's time left in the retry counter. +// If there is time left, it retries again and repeats the logic until it receives +// an acknowledgment or determines that the retry time has expired. If you don't +// want Kinesis Data Firehose to retry sending data, set this value to 0. +type SnowflakeRetryOptions struct { + + // the time period where Kinesis Data Firehose will retry sending data to the + // chosen HTTP endpoint. + DurationInSeconds *int32 + + noSmithyDocumentSerde +} + +// Optionally configure a Snowflake role. Otherwise the default user role will be +// used. +type SnowflakeRoleConfiguration struct { + + // Enable Snowflake role + Enabled *bool + + // The Snowflake role you wish to configure + SnowflakeRole *string + + noSmithyDocumentSerde +} + +// Configure a Snowflake VPC +type SnowflakeVpcConfiguration struct { + + // The VPCE ID for Firehose to privately connect with Snowflake. The ID format is + // com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon + // PrivateLink & Snowflake (https://docs.snowflake.com/en/user-guide/admin-security-privatelink) + // + // This member is required. + PrivateLinkVpceId *string + + noSmithyDocumentSerde +} + // Details about a Kinesis data stream used as the source for a Kinesis Data // Firehose delivery stream. type SourceDescription struct { diff --git a/service/firehose/validators.go b/service/firehose/validators.go index 2fbe1ee9954..1b742beb155 100644 --- a/service/firehose/validators.go +++ b/service/firehose/validators.go @@ -1039,6 +1039,93 @@ func validateS3DestinationUpdate(v *types.S3DestinationUpdate) error { } } +func validateSnowflakeDestinationConfiguration(v *types.SnowflakeDestinationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SnowflakeDestinationConfiguration"} + if v.AccountUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountUrl")) + } + if v.PrivateKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrivateKey")) + } + if v.User == nil { + invalidParams.Add(smithy.NewErrParamRequired("User")) + } + if v.Database == nil { + invalidParams.Add(smithy.NewErrParamRequired("Database")) + } + if v.Schema == nil { + invalidParams.Add(smithy.NewErrParamRequired("Schema")) + } + if v.Table == nil { + invalidParams.Add(smithy.NewErrParamRequired("Table")) + } + if v.SnowflakeVpcConfiguration != nil { + if err := validateSnowflakeVpcConfiguration(v.SnowflakeVpcConfiguration); err != nil { + invalidParams.AddNested("SnowflakeVpcConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.ProcessingConfiguration != nil { + if err := validateProcessingConfiguration(v.ProcessingConfiguration); err != nil { + invalidParams.AddNested("ProcessingConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.RoleARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleARN")) + } + if v.S3Configuration == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3Configuration")) + } else if v.S3Configuration != nil { + if err := validateS3DestinationConfiguration(v.S3Configuration); err != nil { + invalidParams.AddNested("S3Configuration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSnowflakeDestinationUpdate(v *types.SnowflakeDestinationUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SnowflakeDestinationUpdate"} + if v.ProcessingConfiguration != nil { + if err := validateProcessingConfiguration(v.ProcessingConfiguration); err != nil { + invalidParams.AddNested("ProcessingConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.S3Update != nil { + if err := validateS3DestinationUpdate(v.S3Update); err != nil { + invalidParams.AddNested("S3Update", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSnowflakeVpcConfiguration(v *types.SnowflakeVpcConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SnowflakeVpcConfiguration"} + if v.PrivateLinkVpceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrivateLinkVpceId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateSplunkDestinationConfiguration(v *types.SplunkDestinationConfiguration) error { if v == nil { return nil @@ -1215,6 +1302,11 @@ func validateOpCreateDeliveryStreamInput(v *CreateDeliveryStreamInput) error { invalidParams.AddNested("MSKSourceConfiguration", err.(smithy.InvalidParamsError)) } } + if v.SnowflakeDestinationConfiguration != nil { + if err := validateSnowflakeDestinationConfiguration(v.SnowflakeDestinationConfiguration); err != nil { + invalidParams.AddNested("SnowflakeDestinationConfiguration", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -1440,6 +1532,11 @@ func validateOpUpdateDestinationInput(v *UpdateDestinationInput) error { invalidParams.AddNested("AmazonOpenSearchServerlessDestinationUpdate", err.(smithy.InvalidParamsError)) } } + if v.SnowflakeDestinationUpdate != nil { + if err := validateSnowflakeDestinationUpdate(v.SnowflakeDestinationUpdate); err != nil { + invalidParams.AddNested("SnowflakeDestinationUpdate", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/sagemakerfeaturestoreruntime/api_op_DeleteRecord.go b/service/sagemakerfeaturestoreruntime/api_op_DeleteRecord.go index ac3ae0f31e0..af7d22edd48 100644 --- a/service/sagemakerfeaturestoreruntime/api_op_DeleteRecord.go +++ b/service/sagemakerfeaturestoreruntime/api_op_DeleteRecord.go @@ -17,16 +17,26 @@ import ( // set to null and the record is no longer retrievable by GetRecord or // BatchGetRecord . For HardDelete , the complete Record is removed from the // OnlineStore . In both cases, Feature Store appends the deleted record marker to -// the OfflineStore with feature values set to null , is_deleted value set to True -// , and EventTime set to the delete input EventTime . Note that the EventTime -// specified in DeleteRecord should be set later than the EventTime of the -// existing record in the OnlineStore for that RecordIdentifer . If it is not, the -// deletion does not occur: -// - For SoftDelete , the existing (undeleted) record remains in the OnlineStore -// , though the delete record marker is still written to the OfflineStore . +// the OfflineStore . The deleted record marker is a record with the same +// RecordIdentifer as the original, but with is_deleted value set to True , +// EventTime set to the delete input EventTime , and other feature values set to +// null . Note that the EventTime specified in DeleteRecord should be set later +// than the EventTime of the existing record in the OnlineStore for that +// RecordIdentifer . If it is not, the deletion does not occur: +// - For SoftDelete , the existing (not deleted) record remains in the +// OnlineStore , though the delete record marker is still written to the +// OfflineStore . // - HardDelete returns EventTime : 400 ValidationException to indicate that the // delete operation failed. No delete record marker is written to the // OfflineStore . +// +// When a record is deleted from the OnlineStore , the deleted record marker is +// appended to the OfflineStore . If you have the Iceberg table format enabled for +// your OfflineStore , you can remove all history of a record from the OfflineStore +// using Amazon Athena or Apache Spark. For information on how to hard delete a +// record from the OfflineStore with the Iceberg table format enabled, see Delete +// records from the offline store (https://docs.aws.amazon.com/sagemaker/latest/dg/feature-store-delete-records-offline-store.html#feature-store-delete-records-offline-store) +// . func (c *Client) DeleteRecord(ctx context.Context, params *DeleteRecordInput, optFns ...func(*Options)) (*DeleteRecordOutput, error) { if params == nil { params = &DeleteRecordInput{} diff --git a/service/sso/internal/endpoints/endpoints.go b/service/sso/internal/endpoints/endpoints.go index f044afde47c..c8f7c09e46d 100644 --- a/service/sso/internal/endpoints/endpoints.go +++ b/service/sso/internal/endpoints/endpoints.go @@ -283,6 +283,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "il-central-1", }, }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + }, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{