From 13d05473c0b4b32eab9c50287824aa7db05fe984 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Thu, 24 Aug 2023 14:28:08 -0400 Subject: [PATCH] Release v1.44.331 (2023-08-24) (#4964) Release v1.44.331 (2023-08-24) === ### Service Client Updates * `service/ec2`: Updates service API * Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances. * `service/glue`: Updates service API and documentation * Added API attributes that help in the monitoring of sessions. * `service/mediaconvert`: Updates service API and documentation * This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class. * `service/medialive`: Updates service API and documentation * MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings. * `service/mediatailor`: Updates service API and documentation * `service/quicksight`: Updates service API, documentation, and paginators * Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support. * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * This release updates the supported versions for Percona XtraBackup in Aurora MySQL. * `service/s3`: Updates service examples * Updates to endpoint ruleset tests to address Smithy validation issues. * `service/s3control`: Adds new service * Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack. * `service/verifiedpermissions`: Updates service documentation --- CHANGELOG.md | 23 + aws/endpoints/defaults.go | 12 + aws/version.go | 2 +- models/apis/ec2/2016-11-15/api-2.json | 18 +- models/apis/glue/2017-03-31/api-2.json | 12 +- models/apis/glue/2017-03-31/docs-2.json | 11 + .../apis/mediaconvert/2017-08-29/api-2.json | 94 +- .../apis/mediaconvert/2017-08-29/docs-2.json | 43 +- .../2017-08-29/endpoint-rule-set-1.json | 400 +- models/apis/medialive/2017-10-14/api-2.json | 38 + models/apis/medialive/2017-10-14/docs-2.json | 19 + .../2017-10-14/endpoint-rule-set-1.json | 344 +- models/apis/mediatailor/2018-04-23/api-2.json | 3 +- .../apis/mediatailor/2018-04-23/docs-2.json | 2 +- .../2018-04-23/endpoint-rule-set-1.json | 344 +- models/apis/quicksight/2018-04-01/api-2.json | 128 +- models/apis/quicksight/2018-04-01/docs-2.json | 61 +- .../quicksight/2018-04-01/paginators-1.json | 30 + models/apis/rds/2014-10-31/docs-2.json | 4 +- models/apis/rds/2014-10-31/paginators-1.json | 6 + .../apis/s3/2006-03-01/endpoint-tests-1.json | 235 +- models/apis/s3/2006-03-01/examples-1.json | 262 +- .../2018-08-20/endpoint-rule-set-1.json | 3669 ++++++++--------- .../2018-08-20/endpoint-tests-1.json | 196 +- .../2021-12-01/docs-2.json | 20 +- models/endpoints/endpoints.json | 7 + service/ec2/api.go | 64 + service/glue/api.go | 56 + service/mediaconvert/api.go | 323 +- service/medialive/api.go | 100 + service/mediatailor/api.go | 40 +- service/quicksight/api.go | 626 ++- .../quicksight/quicksightiface/interface.go | 15 + service/rds/api.go | 63 +- service/rds/rdsiface/interface.go | 3 + service/s3/examples_test.go | 180 +- service/verifiedpermissions/api.go | 51 +- 37 files changed, 4223 insertions(+), 3281 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c90ab4186ac..d8f0f44a2b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +Release v1.44.331 (2023-08-24) +=== + +### Service Client Updates +* `service/ec2`: Updates service API + * Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances. +* `service/glue`: Updates service API and documentation + * Added API attributes that help in the monitoring of sessions. +* `service/mediaconvert`: Updates service API and documentation + * This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class. +* `service/medialive`: Updates service API and documentation + * MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings. +* `service/mediatailor`: Updates service API and documentation +* `service/quicksight`: Updates service API, documentation, and paginators + * Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support. +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * This release updates the supported versions for Percona XtraBackup in Aurora MySQL. +* `service/s3`: Updates service examples + * Updates to endpoint ruleset tests to address Smithy validation issues. +* `service/s3control`: Adds new service + * Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack. +* `service/verifiedpermissions`: Updates service documentation + Release v1.44.330 (2023-08-23) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 33b80b07f76..5b78f6d95d0 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -3670,6 +3670,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7903,6 +7912,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 16852ff70b0..a4421da7a8a 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.330" +const SDKVersion = "1.44.331" diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 190509edd57..c8cd919484a 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -27354,7 +27354,23 @@ "m7i-flex.xlarge", "m7i-flex.2xlarge", "m7i-flex.4xlarge", - "m7i-flex.8xlarge" + "m7i-flex.8xlarge", + "m7a.medium", + "m7a.large", + "m7a.xlarge", + "m7a.2xlarge", + "m7a.4xlarge", + "m7a.8xlarge", + "m7a.12xlarge", + "m7a.16xlarge", + "m7a.24xlarge", + "m7a.32xlarge", + "m7a.48xlarge", + "m7a.metal-48xl", + "hpc7a.12xlarge", + "hpc7a.24xlarge", + "hpc7a.48xlarge", + "hpc7a.96xlarge" ] }, "InstanceTypeHypervisor":{ diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index 0b68fc18984..2143da1c165 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -8261,6 +8261,10 @@ }, "exception":true }, + "IdleTimeout":{ + "type":"integer", + "box":true + }, "IllegalBlueprintStateException":{ "type":"structure", "members":{ @@ -10998,7 +11002,13 @@ "Progress":{"shape":"DoubleValue"}, "MaxCapacity":{"shape":"NullableDouble"}, "SecurityConfiguration":{"shape":"NameString"}, - "GlueVersion":{"shape":"GlueVersionString"} + "GlueVersion":{"shape":"GlueVersionString"}, + "NumberOfWorkers":{"shape":"NullableInteger"}, + "WorkerType":{"shape":"WorkerType"}, + "CompletedOn":{"shape":"TimestampValue"}, + "ExecutionTime":{"shape":"NullableDouble"}, + "DPUSeconds":{"shape":"NullableDouble"}, + "IdleTimeout":{"shape":"IdleTimeout"} } }, "SessionCommand":{ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index b394da6432d..8131071a8fd 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -4410,6 +4410,12 @@ "refs": { } }, + "IdleTimeout": { + "base": null, + "refs": { + "Session$IdleTimeout": "

The number of minutes when idle before the session times out.

" + } + }, "IllegalBlueprintStateException": { "base": "

The blueprint is in an invalid state to perform a requested operation.

", "refs": { @@ -5909,6 +5915,8 @@ "JobUpdate$MaxCapacity": "

For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

", "MLTransform$MaxCapacity": "

The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

", "Session$MaxCapacity": "

The number of Glue data processing units (DPUs) that can be allocated when the job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB memory.

", + "Session$ExecutionTime": "

The total time the session ran for.

", + "Session$DPUSeconds": "

The DPUs consumed by the session (formula: ExecutionTime * MaxCapacity).

", "StartJobRunRequest$MaxCapacity": "

For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

", "UpdateMLTransformRequest$MaxCapacity": "

The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

" } @@ -5937,6 +5945,7 @@ "MLTransform$NumberOfWorkers": "

The number of workers of a defined workerType that are allocated when a task of the transform runs.

If WorkerType is set, then NumberOfWorkers is required (and vice versa).

", "MLTransform$MaxRetries": "

The maximum number of times to retry after an MLTaskRun of the machine learning transform fails.

", "S3Target$SampleSize": "

Sets the number of files in each leaf folder to be crawled when crawling sample files in a dataset. If not set, all the files are crawled. A valid value is an integer between 1 and 249.

", + "Session$NumberOfWorkers": "

The number of workers of a defined WorkerType to use for the session.

", "StartDataQualityRuleRecommendationRunRequest$NumberOfWorkers": "

The number of G.1X workers to be used in the run. The default is 5.

", "StartDataQualityRulesetEvaluationRunRequest$NumberOfWorkers": "

The number of G.1X workers to be used in the run. The default is 5.

", "StartJobRunRequest$NumberOfWorkers": "

The number of workers of a defined workerType that are allocated when a job runs.

", @@ -8001,6 +8010,7 @@ "LastActiveDefinition$LastModifiedOn": "

The date and time the blueprint was last modified.

", "SecurityConfiguration$CreatedTimeStamp": "

The time at which this security configuration was created.

", "Session$CreatedOn": "

The time and date when the session was created.

", + "Session$CompletedOn": "

The date and time that this session is completed.

", "Workflow$CreatedOn": "

The date and time when the workflow was created.

", "Workflow$LastModifiedOn": "

The date and time when the workflow was last modified.

", "WorkflowRun$StartedOn": "

The date and time when the workflow run was started.

", @@ -8666,6 +8676,7 @@ "JobRun$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", "JobUpdate$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", "MLTransform$WorkerType": "

The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.

MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType.

", + "Session$WorkerType": "

The type of predefined worker that is allocated when a session runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark sessions. Accepts the value Z.2X for Ray sessions.

", "StartJobRunRequest$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", "UpdateMLTransformRequest$WorkerType": "

The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.

" } diff --git a/models/apis/mediaconvert/2017-08-29/api-2.json b/models/apis/mediaconvert/2017-08-29/api-2.json index 97725f2474a..df8e0a4a2a4 100644 --- a/models/apis/mediaconvert/2017-08-29/api-2.json +++ b/models/apis/mediaconvert/2017-08-29/api-2.json @@ -1357,7 +1357,20 @@ "TCS", "VHL", "VHC", - "VHR" + "VHR", + "TBL", + "TBC", + "TBR", + "RSL", + "RSR", + "LW", + "RW", + "LFE2", + "LT", + "RT", + "HI", + "NAR", + "M" ] }, "AudioChannelTaggingSettings": { @@ -1382,7 +1395,8 @@ "EAC3_ATMOS", "VORBIS", "OPUS", - "PASSTHROUGH" + "PASSTHROUGH", + "FLAC" ] }, "AudioCodecSettings": { @@ -1412,6 +1426,10 @@ "shape": "Eac3Settings", "locationName": "eac3Settings" }, + "FlacSettings": { + "shape": "FlacSettings", + "locationName": "flacSettings" + }, "Mp2Settings": { "shape": "Mp2Settings", "locationName": "mp2Settings" @@ -1720,6 +1738,13 @@ "BIT_10" ] }, + "Av1FilmGrainSynthesis": { + "type": "string", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "Av1FramerateControl": { "type": "string", "enum": [ @@ -1765,6 +1790,10 @@ "shape": "Av1BitDepth", "locationName": "bitDepth" }, + "FilmGrainSynthesis": { + "shape": "Av1FilmGrainSynthesis", + "locationName": "filmGrainSynthesis" + }, "FramerateControl": { "shape": "Av1FramerateControl", "locationName": "framerateControl" @@ -4552,6 +4581,23 @@ "MILLISECONDS" ] }, + "FlacSettings": { + "type": "structure", + "members": { + "BitDepth": { + "shape": "__integerMin16Max24", + "locationName": "bitDepth" + }, + "Channels": { + "shape": "__integerMin1Max8", + "locationName": "channels" + }, + "SampleRate": { + "shape": "__integerMin22050Max48000", + "locationName": "sampleRate" + } + } + }, "FontScript": { "type": "string", "enum": [ @@ -7479,6 +7525,14 @@ "shape": "__integerMin0Max65535", "locationName": "programNumber" }, + "PtsOffset": { + "shape": "__integerMin0Max3600", + "locationName": "ptsOffset" + }, + "PtsOffsetMode": { + "shape": "TsPtsOffset", + "locationName": "ptsOffsetMode" + }, "RateMode": { "shape": "M2tsRateMode", "locationName": "rateMode" @@ -7611,6 +7665,14 @@ "shape": "__integerMin0Max65535", "locationName": "programNumber" }, + "PtsOffset": { + "shape": "__integerMin0Max3600", + "locationName": "ptsOffset" + }, + "PtsOffsetMode": { + "shape": "TsPtsOffset", + "locationName": "ptsOffsetMode" + }, "Scte35Pid": { "shape": "__integerMin32Max8182", "locationName": "scte35Pid" @@ -9318,6 +9380,10 @@ "Encryption": { "shape": "S3EncryptionSettings", "locationName": "encryption" + }, + "StorageClass": { + "shape": "S3StorageClass", + "locationName": "storageClass" } } }, @@ -9354,6 +9420,18 @@ "SERVER_SIDE_ENCRYPTION_KMS" ] }, + "S3StorageClass": { + "type": "string", + "enum": [ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE" + ] + }, "SampleRangeConversion": { "type": "string", "enum": [ @@ -9667,6 +9745,13 @@ } } }, + "TsPtsOffset": { + "type": "string", + "enum": [ + "AUTO", + "SECONDS" + ] + }, "TtmlDestinationSettings": { "type": "structure", "members": { @@ -11151,6 +11236,11 @@ "min": 1, "max": 64 }, + "__integerMin1Max8": { + "type": "integer", + "min": 1, + "max": 8 + }, "__integerMin22050Max48000": { "type": "integer", "min": 22050, diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index 03a3ceca4fa..3f605b88e90 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -14,7 +14,7 @@ "DeleteQueue": "Permanently delete a queue you have created.", "DescribeEndpoints": "Send an request with an empty body to the regional API endpoint to get your account API endpoint.", "DisassociateCertificate": "Removes an association between the Amazon Resource Name (ARN) of an AWS Certificate Manager (ACM) certificate and an AWS Elemental MediaConvert resource.", - "GetJob": "Retrieve the JSON for a specific completed transcoding job.", + "GetJob": "Retrieve the JSON for a specific transcoding job.", "GetJobTemplate": "Retrieve the JSON for a specific job template.", "GetPolicy": "Retrieve the JSON for your policy.", "GetPreset": "Retrieve the JSON for a specific preset.", @@ -366,6 +366,12 @@ "Av1Settings$BitDepth": "Specify the Bit depth. You can choose 8-bit or 10-bit." } }, + "Av1FilmGrainSynthesis": { + "base": "Film grain synthesis replaces film grain present in your content with similar quality synthesized AV1 film grain. We recommend that you choose Enabled to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. For QVBR quality level 9 or 10 outputs we recommend that you keep the default value, Disabled. When you include Film grain synthesis, you cannot include the Noise reducer preprocessor.", + "refs": { + "Av1Settings$FilmGrainSynthesis": "Film grain synthesis replaces film grain present in your content with similar quality synthesized AV1 film grain. We recommend that you choose Enabled to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. For QVBR quality level 9 or 10 outputs we recommend that you keep the default value, Disabled. When you include Film grain synthesis, you cannot include the Noise reducer preprocessor." + } + }, "Av1FramerateControl": { "base": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { @@ -1549,6 +1555,12 @@ "FileSourceSettings$TimeDeltaUnits": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default." } }, + "FlacSettings": { + "base": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC.", + "refs": { + "AudioCodecSettings$FlacSettings": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC." + } + }, "FontScript": { "base": "Provide the font script, using an ISO 15924 script code, if the LanguageCode is not sufficient for determining the script type. Where LanguageCode or CustomLanguageCode is sufficient, use \"AUTOMATIC\" or leave unset.", "refs": { @@ -3338,6 +3350,12 @@ "S3EncryptionSettings$EncryptionType": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3. If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN." } }, + "S3StorageClass": { + "base": "Specify the S3 storage class to use for this destination.", + "refs": { + "S3DestinationSettings$StorageClass": "Specify the S3 storage class to use for this destination." + } + }, "SampleRangeConversion": { "base": "Specify how MediaConvert limits the color sample range for this output. To create a limited range output from a full range input: Choose Limited range squeeze. For full range inputs, MediaConvert performs a linear offset to color samples equally across all pixels and frames. Color samples in 10-bit outputs are limited to 64 through 940, and 8-bit outputs are limited to 16 through 235. Note: For limited range inputs, values for color samples are passed through to your output unchanged. MediaConvert does not limit the sample range. To correct pixels in your input that are out of range or out of gamut: Choose Limited range clip. Use for broadcast applications. MediaConvert conforms any pixels outside of the values that you specify under Minimum YUV and Maximum YUV to limited range bounds. MediaConvert also corrects any YUV values that, when converted to RGB, would be outside the bounds you specify under Minimum RGB tolerance and Maximum RGB tolerance. With either limited range conversion, MediaConvert writes the sample range metadata in the output.", "refs": { @@ -3495,6 +3513,13 @@ "CaptionSourceSettings$TrackSourceSettings": "Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings." } }, + "TsPtsOffset": { + "base": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset.", + "refs": { + "M2tsSettings$PtsOffsetMode": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset.", + "M3u8Settings$PtsOffsetMode": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset." + } + }, "TtmlDestinationSettings": { "base": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.", "refs": { @@ -4189,7 +4214,9 @@ "__integerMin0Max3600": { "base": null, "refs": { - "HlsGroupSettings$ProgramDateTimePeriod": "Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds." + "HlsGroupSettings$ProgramDateTimePeriod": "Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds.", + "M2tsSettings$PtsOffset": "Manually specify the initial PTS offset, in seconds, when you set PTS offset to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default value 2.", + "M3u8Settings$PtsOffset": "Manually specify the initial PTS offset, in seconds, when you set PTS offset to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default value 2." } }, "__integerMin0Max4": { @@ -4302,8 +4329,8 @@ "__integerMin100000Max100000000": { "base": null, "refs": { - "AutomatedAbrSettings$MaxAbrBitrate": "Optional. The maximum target bit rate used in your automated ABR stack. Use this value to set an upper limit on the bandwidth consumed by the highest-quality rendition. This is the rendition that is delivered to viewers with the fastest internet connections. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default.", - "AutomatedAbrSettings$MinAbrBitrate": "Optional. The minimum target bitrate used in your automated ABR stack. Use this value to set a lower limit on the bitrate of video delivered to viewers with slow internet connections. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default." + "AutomatedAbrSettings$MaxAbrBitrate": "Specify the maximum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default. The average bitrate of your highest-quality rendition will be equal to or below this value, depending on the quality, complexity, and resolution of your content. Note that the instantaneous maximum bitrate may vary above the value that you specify.", + "AutomatedAbrSettings$MinAbrBitrate": "Specify the minimum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default. The average bitrate of your lowest-quality rendition will be near this value. Note that the instantaneous minimum bitrate may vary below the value that you specify." } }, "__integerMin1000Max1152000000": { @@ -4379,6 +4406,7 @@ "base": null, "refs": { "AiffSettings$BitDepth": "Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track.", + "FlacSettings$BitDepth": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track.", "WavSettings$BitDepth": "Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track." } }, @@ -4565,9 +4593,16 @@ "WavSettings$Channels": "Specify the number of channels in this output audio track. Valid values are 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64." } }, + "__integerMin1Max8": { + "base": null, + "refs": { + "FlacSettings$Channels": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are between 1 and 8." + } + }, "__integerMin22050Max48000": { "base": null, "refs": { + "FlacSettings$SampleRate": "Sample rate in hz.", "Mp3Settings$SampleRate": "Sample rate in hz.", "VorbisSettings$SampleRate": "Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000, 44100, and 48000. The default value is 48000." } diff --git a/models/apis/mediaconvert/2017-08-29/endpoint-rule-set-1.json b/models/apis/mediaconvert/2017-08-29/endpoint-rule-set-1.json index dcc0061568b..b3cfd3d2ad8 100644 --- a/models/apis/mediaconvert/2017-08-29/endpoint-rule-set-1.json +++ b/models/apis/mediaconvert/2017-08-29/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,175 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://mediaconvert.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "PartitionResult" }, - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -304,91 +225,134 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://mediaconvert.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { "conditions": [], - "type": "tree", - "rules": [ + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "cn-northwest-1" + "supportsDualStack" ] } - ], - "endpoint": { - "url": "https://subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [], "endpoint": { - "url": "https://mediaconvert.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://mediaconvert.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "cn-northwest-1" + ] + } + ], + "endpoint": { + "url": "https://subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://mediaconvert.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/medialive/2017-10-14/api-2.json b/models/apis/medialive/2017-10-14/api-2.json index 19c853c7ee0..aa1a343d274 100644 --- a/models/apis/medialive/2017-10-14/api-2.json +++ b/models/apis/medialive/2017-10-14/api-2.json @@ -2443,6 +2443,13 @@ "MEDIUM_LOW" ] }, + "Ac3AttenuationControl": { + "type": "string", + "enum": [ + "ATTENUATE_3_DB", + "NONE" + ] + }, "Ac3BitstreamMode": { "type": "string", "enum": [ @@ -2516,6 +2523,10 @@ "MetadataControl": { "shape": "Ac3MetadataControl", "locationName": "metadataControl" + }, + "AttenuationControl": { + "shape": "Ac3AttenuationControl", + "locationName": "attenuationControl" } } }, @@ -7896,6 +7907,14 @@ "members": { } }, + "IncludeFillerNalUnits": { + "type": "string", + "enum": [ + "AUTO", + "DROP", + "INCLUDE" + ] + }, "Input": { "type": "structure", "members": { @@ -9715,6 +9734,13 @@ "PASSTHROUGH" ] }, + "M3u8KlvBehavior": { + "type": "string", + "enum": [ + "NO_PASSTHROUGH", + "PASSTHROUGH" + ] + }, "M3u8NielsenId3Behavior": { "type": "string", "enum": [ @@ -9806,6 +9832,14 @@ "VideoPid": { "shape": "__string", "locationName": "videoPid" + }, + "KlvBehavior": { + "shape": "M3u8KlvBehavior", + "locationName": "klvBehavior" + }, + "KlvDataPids": { + "shape": "__string", + "locationName": "klvDataPids" } } }, @@ -11539,6 +11573,10 @@ "RestartDelay": { "shape": "__integerMin0", "locationName": "restartDelay" + }, + "IncludeFillerNalUnits": { + "shape": "IncludeFillerNalUnits", + "locationName": "includeFillerNalUnits" } } }, diff --git a/models/apis/medialive/2017-10-14/docs-2.json b/models/apis/medialive/2017-10-14/docs-2.json index 166b39ff314..b365c325680 100644 --- a/models/apis/medialive/2017-10-14/docs-2.json +++ b/models/apis/medialive/2017-10-14/docs-2.json @@ -114,6 +114,12 @@ "AacSettings$VbrQuality": "VBR Quality Level - Only used if rateControlMode is VBR." } }, + "Ac3AttenuationControl": { + "base": "Ac3 Attenuation Control", + "refs": { + "Ac3Settings$AttenuationControl": "Applies a 3 dB attenuation to the surround channels. Applies only when the coding mode parameter is CODING_MODE_3_2_LFE." + } + }, "Ac3BitstreamMode": { "base": "Ac3 Bitstream Mode", "refs": { @@ -1683,6 +1689,12 @@ "ScheduleActionStartSettings$ImmediateModeScheduleActionStartSettings": "Option for specifying an action that should be applied immediately." } }, + "IncludeFillerNalUnits": { + "base": "Include Filler Nal Units", + "refs": { + "RtmpGroupSettings$IncludeFillerNalUnits": "Applies only when the rate control mode (in the codec settings) is CBR (constant bit rate). Controls whether the RTMP output stream is padded (with FILL NAL units) in order to achieve a constant bit rate that is truly constant. When there is no padding, the bandwidth varies (up to the bitrate value in the codec settings). We recommend that you choose Auto." + } + }, "Input": { "base": null, "refs": { @@ -2258,6 +2270,12 @@ "M2tsSettings$TimedMetadataBehavior": "When set to passthrough, timed metadata will be passed through from input to output." } }, + "M3u8KlvBehavior": { + "base": "M3u8 Klv Behavior", + "refs": { + "M3u8Settings$KlvBehavior": "If set to passthrough, passes any KLV data from the input source to this output." + } + }, "M3u8NielsenId3Behavior": { "base": "M3u8 Nielsen Id3 Behavior", "refs": { @@ -4676,6 +4694,7 @@ "M2tsSettings$VideoPid": "Packet Identifier (PID) of the elementary video stream in the transport stream. Can be entered as a decimal or hexadecimal value. Valid values are 32 (or 0x20)..8182 (or 0x1ff6).", "M3u8Settings$AudioPids": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation. Can be entered as decimal or hexadecimal values.", "M3u8Settings$EcmPid": "This parameter is unused and deprecated.", + "M3u8Settings$KlvDataPids": "Packet Identifier (PID) for input source KLV data to this output. Multiple values are accepted, and can be entered in ranges and/or by comma separation. Can be entered as decimal or hexadecimal values. Each PID specified must be in the range of 32 (or 0x20)..8182 (or 0x1ff6).", "M3u8Settings$PcrPid": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID. Can be entered as a decimal or hexadecimal value.", "M3u8Settings$PmtPid": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream. Can be entered as a decimal or hexadecimal value.", "M3u8Settings$Scte35Pid": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can be entered as a decimal or hexadecimal value.", diff --git a/models/apis/medialive/2017-10-14/endpoint-rule-set-1.json b/models/apis/medialive/2017-10-14/endpoint-rule-set-1.json index a354a5ca273..93e47c55b38 100644 --- a/models/apis/medialive/2017-10-14/endpoint-rule-set-1.json +++ b/models/apis/medialive/2017-10-14/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://medialive-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://medialive-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://medialive-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://medialive-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://medialive.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://medialive.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://medialive.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://medialive.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/mediatailor/2018-04-23/api-2.json b/models/apis/mediatailor/2018-04-23/api-2.json index c2974c923af..7bcb802d8b7 100644 --- a/models/apis/mediatailor/2018-04-23/api-2.json +++ b/models/apis/mediatailor/2018-04-23/api-2.json @@ -498,7 +498,8 @@ "type":"string", "enum":[ "S3_SIGV4", - "SECRETS_MANAGER_ACCESS_TOKEN" + "SECRETS_MANAGER_ACCESS_TOKEN", + "AUTODETECT_SIGV4" ] }, "AdBreak":{ diff --git a/models/apis/mediatailor/2018-04-23/docs-2.json b/models/apis/mediatailor/2018-04-23/docs-2.json index eb6a3372116..c59fe13567b 100644 --- a/models/apis/mediatailor/2018-04-23/docs-2.json +++ b/models/apis/mediatailor/2018-04-23/docs-2.json @@ -62,7 +62,7 @@ "AccessType": { "base": null, "refs": { - "AccessConfiguration$AccessType": "

The type of authentication used to access content from HttpConfiguration::BaseUrl on your source location. Accepted value: S3_SIGV4.

S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style access. If your source location base URL is an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the bucket where your source content is stored. Your MediaTailor source location baseURL must follow the S3 virtual hosted-style request URL format. For example, https://bucket-name.s3.Region.amazonaws.com/key-name.

Before you can use S3_SIGV4, you must meet these requirements:

• You must allow MediaTailor to access your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For information about configuring access in IAM, see Access management in the IAM User Guide.

• The mediatailor.amazonaws.com service principal must have permissions to read all top level manifests referenced by the VodSource packaging configurations.

• The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

" + "AccessConfiguration$AccessType": "

The type of authentication used to access content from HttpConfiguration::BaseUrl on your source location.

S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style access. If your source location base URL is an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the bucket where your source content is stored. Your MediaTailor source location baseURL must follow the S3 virtual hosted-style request URL format. For example, https://bucket-name.s3.Region.amazonaws.com/key-name.

Before you can use S3_SIGV4, you must meet these requirements:

• You must allow MediaTailor to access your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For information about configuring access in IAM, see Access management in the IAM User Guide.

• The mediatailor.amazonaws.com service principal must have permissions to read all top level manifests referenced by the VodSource packaging configurations.

• The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

AUTODETECT_SIGV4 - AWS Signature Version 4 authentication for a set of supported services: MediaPackage Version 2 and Amazon S3 hosted virtual-style access. If your source location base URL is a MediaPackage Version 2 endpoint or an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the resource where your source content is stored.

Before you can use AUTODETECT_SIGV4 with a MediaPackage Version 2 endpoint, you must meet these requirements:

• You must grant MediaTailor access to your MediaPackage endpoint by granting mediatailor.amazonaws.com principal access in an Origin Access policy on the endpoint.

• Your MediaTailor source location base URL must be a MediaPackage V2 endpoint.

• The caller of the API must have mediapackagev2:GetObject IAM permissions to read all top level manifests referenced by the MediaTailor source packaging configurations.

Before you can use AUTODETECT_SIGV4 with an Amazon S3 bucket, you must meet these requirements:

• You must grant MediaTailor access to your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For more information about configuring access in IAM, see Access management in the IAM User Guide..

• The mediatailor.amazonaws.com service principal must have permissions to read all top-level manifests referenced by the VodSource packaging configurations.

• The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

" } }, "AdBreak": { diff --git a/models/apis/mediatailor/2018-04-23/endpoint-rule-set-1.json b/models/apis/mediatailor/2018-04-23/endpoint-rule-set-1.json index 03d6980c4d0..f6487bddd7f 100644 --- a/models/apis/mediatailor/2018-04-23/endpoint-rule-set-1.json +++ b/models/apis/mediatailor/2018-04-23/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://api.mediatailor.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://api.mediatailor.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://api.mediatailor.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index 7cd2a429103..0040f6ab19f 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -1166,6 +1166,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ] @@ -1183,6 +1184,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ] @@ -2981,6 +2983,11 @@ "min":1, "pattern":"[\\w\\-]+|(\\$LATEST)|(\\$PUBLISHED)" }, + "AllSheetsFilterScopeConfiguration":{ + "type":"structure", + "members":{ + } + }, "AmazonElasticsearchParameters":{ "type":"structure", "required":["Domain"], @@ -5220,7 +5227,8 @@ "FolderType":{"shape":"FolderType"}, "ParentFolderArn":{"shape":"Arn"}, "Permissions":{"shape":"ResourcePermissionList"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "SharingModel":{"shape":"SharingModel"} } }, "CreateFolderResponse":{ @@ -8425,6 +8433,22 @@ "shape":"RestrictiveResourceId", "location":"uri", "locationName":"FolderId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"querystring", + "locationName":"namespace" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" } } }, @@ -8438,7 +8462,8 @@ "FolderId":{"shape":"RestrictiveResourceId"}, "Arn":{"shape":"Arn"}, "Permissions":{"shape":"ResourcePermissionList"}, - "RequestId":{"shape":"String"} + "RequestId":{"shape":"String"}, + "NextToken":{"shape":"String"} } }, "DescribeFolderRequest":{ @@ -8476,6 +8501,22 @@ "shape":"RestrictiveResourceId", "location":"uri", "locationName":"FolderId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"querystring", + "locationName":"namespace" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" } } }, @@ -8489,7 +8530,8 @@ "FolderId":{"shape":"RestrictiveResourceId"}, "Arn":{"shape":"Arn"}, "Permissions":{"shape":"ResourcePermissionList"}, - "RequestId":{"shape":"String"} + "RequestId":{"shape":"String"}, + "NextToken":{"shape":"String"} } }, "DescribeFolderResponse":{ @@ -9823,7 +9865,8 @@ "FilterScopeConfiguration":{ "type":"structure", "members":{ - "SelectedSheets":{"shape":"SelectedSheetsFilterScopeConfiguration"} + "SelectedSheets":{"shape":"SelectedSheetsFilterScopeConfiguration"}, + "AllSheets":{"shape":"AllSheetsFilterScopeConfiguration"} } }, "FilterSelectableValues":{ @@ -9903,7 +9946,8 @@ "FolderType":{"shape":"FolderType"}, "FolderPath":{"shape":"Path"}, "CreatedTime":{"shape":"Timestamp"}, - "LastUpdatedTime":{"shape":"Timestamp"} + "LastUpdatedTime":{"shape":"Timestamp"}, + "SharingModel":{"shape":"SharingModel"} } }, "FolderColumnList":{ @@ -9961,7 +10005,8 @@ "Name":{"shape":"FolderName"}, "FolderType":{"shape":"FolderType"}, "CreatedTime":{"shape":"Timestamp"}, - "LastUpdatedTime":{"shape":"Timestamp"} + "LastUpdatedTime":{"shape":"Timestamp"}, + "SharingModel":{"shape":"SharingModel"} } }, "FolderSummaryList":{ @@ -10029,10 +10074,7 @@ }, "ForecastComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{"shape":"ShortRestrictiveResourceId"}, "Name":{"shape":"String"}, @@ -10804,10 +10846,7 @@ }, "GrowthRateComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{"shape":"ShortRestrictiveResourceId"}, "Name":{"shape":"String"}, @@ -13008,7 +13047,6 @@ "type":"structure", "required":[ "ComputationId", - "Time", "Type" ], "members":{ @@ -13057,12 +13095,7 @@ }, "MetricComparisonComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time", - "FromValue", - "TargetValue" - ], + "required":["ComputationId"], "members":{ "ComputationId":{"shape":"ShortRestrictiveResourceId"}, "Name":{"shape":"String"}, @@ -13818,10 +13851,7 @@ }, "PeriodOverPeriodComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{"shape":"ShortRestrictiveResourceId"}, "Name":{"shape":"String"}, @@ -13831,10 +13861,7 @@ }, "PeriodToDateComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{"shape":"ShortRestrictiveResourceId"}, "Name":{"shape":"String"}, @@ -15751,6 +15778,13 @@ "BackgroundColor":{"shape":"ConditionalFormattingColor"} } }, + "SharingModel":{ + "type":"string", + "enum":[ + "ACCOUNT", + "NAMESPACE" + ] + }, "Sheet":{ "type":"structure", "members":{ @@ -16100,7 +16134,8 @@ "type":"string", "enum":[ "CSV", - "PDF" + "PDF", + "EXCEL" ] }, "SnapshotFileGroup":{ @@ -16112,7 +16147,7 @@ "SnapshotFileGroupList":{ "type":"list", "member":{"shape":"SnapshotFileGroup"}, - "max":6, + "max":7, "min":1 }, "SnapshotFileList":{ @@ -16136,7 +16171,7 @@ "SnapshotFileSheetSelectionList":{ "type":"list", "member":{"shape":"SnapshotFileSheetSelection"}, - "max":1, + "max":5, "min":1 }, "SnapshotFileSheetSelectionScope":{ @@ -16149,7 +16184,7 @@ "SnapshotFileSheetSelectionVisualIdList":{ "type":"list", "member":{"shape":"ShortRestrictiveResourceId"}, - "max":1, + "max":5, "min":1 }, "SnapshotJobErrorInfo":{ @@ -16719,9 +16754,15 @@ "type":"structure", "members":{ "SelectedFieldOptions":{"shape":"TableFieldOptionList"}, - "Order":{"shape":"FieldOrderList"} + "Order":{"shape":"FieldOrderList"}, + "PinnedFieldOptions":{"shape":"TablePinnedFieldOptions"} } }, + "TableFieldOrderList":{ + "type":"list", + "member":{"shape":"FieldId"}, + "max":201 + }, "TableFieldURLConfiguration":{ "type":"structure", "members":{ @@ -16770,6 +16811,12 @@ "OverflowColumnHeaderVisibility":{"shape":"Visibility"} } }, + "TablePinnedFieldOptions":{ + "type":"structure", + "members":{ + "PinnedLeftFields":{"shape":"TableFieldOrderList"} + } + }, "TableRowConditionalFormatting":{ "type":"structure", "members":{ @@ -17413,8 +17460,6 @@ "type":"structure", "required":[ "ComputationId", - "Time", - "Category", "Type" ], "members":{ @@ -17437,7 +17482,6 @@ "type":"structure", "required":[ "ComputationId", - "Category", "Type" ], "members":{ @@ -17733,10 +17777,7 @@ }, "TotalAggregationComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Value" - ], + "required":["ComputationId"], "members":{ "ComputationId":{"shape":"ShortRestrictiveResourceId"}, "Name":{"shape":"String"}, @@ -17925,10 +17966,7 @@ }, "UniqueValuesComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Category" - ], + "required":["ComputationId"], "members":{ "ComputationId":{"shape":"ShortRestrictiveResourceId"}, "Name":{"shape":"String"}, diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index 418d7ad0249..69222950048 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -131,7 +131,7 @@ "SearchGroups": "

Use the SearchGroups operation to search groups in a specified Amazon QuickSight namespace using the supplied filters.

", "StartAssetBundleExportJob": "

Starts an Asset Bundle export job.

An Asset Bundle export job exports specified Amazon QuickSight assets. You can also choose to export any asset dependencies in the same job. Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 5 export jobs concurrently.

The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported.

", "StartAssetBundleImportJob": "

Starts an Asset Bundle import job.

An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 5 import jobs concurrently.

The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.

", - "StartDashboardSnapshotJob": "

Starts an asynchronous job that generates a dashboard snapshot. You can request one of the following format configurations per API call.

Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.

", + "StartDashboardSnapshotJob": "

Starts an asynchronous job that generates a dashboard snapshot. You can request one of the following format configurations per API call.

Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.

", "TagResource": "

Assigns one or more tags (key-value pairs) to the specified Amazon QuickSight resource.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource. Amazon QuickSight supports tagging on data set, data source, dashboard, template, and topic.

Tagging for Amazon QuickSight works in a similar way to tagging for other Amazon Web Services services, except for the following:

", "UntagResource": "

Removes a tag or tags from a resource.

", "UpdateAccountCustomization": "

Updates Amazon QuickSight customizations for the current Amazon Web Services Region. Currently, the only customization that you can use is a theme.

You can use customizations for your Amazon Web Services account or, if you specify a namespace, for a Amazon QuickSight namespace instead. Customizations that apply to a namespace override customizations that apply to an Amazon Web Services account. To find out which customizations apply, use the DescribeAccountCustomization API operation.

", @@ -279,6 +279,12 @@ "UpdateThemeAliasRequest$AliasName": "

The name of the theme alias that you want to update.

" } }, + "AllSheetsFilterScopeConfiguration": { + "base": "

The configuration for applying a filter to all sheets. You can apply this filter to all visuals on every sheet.

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

", + "refs": { + "FilterScopeConfiguration$AllSheets": "

The configuration for applying a filter to all sheets.

" + } + }, "AmazonElasticsearchParameters": { "base": "

The parameters for OpenSearch.

", "refs": { @@ -4515,6 +4521,7 @@ "SelectedFieldList$member": null, "TableCellConditionalFormatting$FieldId": "

The field ID of the cell for conditional formatting.

", "TableFieldOption$FieldId": "

The field ID for a table field.

", + "TableFieldOrderList$member": null, "UnaggregatedField$FieldId": "

The custom field ID.

" } }, @@ -4527,7 +4534,7 @@ "FieldOrderList": { "base": null, "refs": { - "TableFieldOptions$Order": "

The order of field IDs of the field options for a table visual.

" + "TableFieldOptions$Order": "

The order of the field IDs that are configured as field options for a table visual.

" } }, "FieldSeriesItem": { @@ -6741,6 +6748,8 @@ "MaxResults": { "base": null, "refs": { + "DescribeFolderPermissionsRequest$MaxResults": "

The maximum number of results to be returned per request.

", + "DescribeFolderResolvedPermissionsRequest$MaxResults": "

The maximum number of results to be returned per request.

", "ListAnalysesRequest$MaxResults": "

The maximum number of results to return.

", "ListAssetBundleExportJobsRequest$MaxResults": "

The maximum number of results to be returned per request.

", "ListAssetBundleImportJobsRequest$MaxResults": "

The maximum number of results to be returned per request.

", @@ -6847,8 +6856,8 @@ "MemberType": { "base": null, "refs": { - "CreateFolderMembershipRequest$MemberType": "

The type of the member, including DASHBOARD, ANALYSIS, and DATASET.

", - "DeleteFolderMembershipRequest$MemberType": "

The type of the member, including DASHBOARD, ANALYSIS, and DATASET

", + "CreateFolderMembershipRequest$MemberType": "

The member type of the asset that you want to add to a folder.

", + "DeleteFolderMembershipRequest$MemberType": "

The member type of the asset that you want to delete from a folder.

", "FolderMember$MemberType": "

The type of asset that it is.

" } }, @@ -6945,6 +6954,8 @@ "DeleteUserRequest$Namespace": "

The namespace. Currently, you should set this to default.

", "DescribeAccountCustomizationRequest$Namespace": "

The Amazon QuickSight namespace that you want to describe Amazon QuickSight customizations for.

", "DescribeAccountCustomizationResponse$Namespace": "

The Amazon QuickSight namespace that you're describing.

", + "DescribeFolderPermissionsRequest$Namespace": "

The namespace of the folder whose permissions you want described.

", + "DescribeFolderResolvedPermissionsRequest$Namespace": "

The namespace of the folder whose permissions you want described.

", "DescribeGroupMembershipRequest$Namespace": "

The namespace that includes the group you are searching within.

", "DescribeGroupRequest$Namespace": "

The namespace of the group that you want described.

", "DescribeIAMPolicyAssignmentRequest$Namespace": "

The namespace that contains the assignment.

", @@ -8459,11 +8470,11 @@ "refs": { "AnonymousUserQSearchBarEmbeddingConfiguration$InitialTopicId": "

The QuickSight Q topic ID of the topic that you want the anonymous user to see first. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders the Q search bar with this topic pre-selected.

The Amazon Resource Name (ARN) of this Q topic must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

", "CreateFolderMembershipRequest$FolderId": "

The ID of the folder.

", - "CreateFolderMembershipRequest$MemberId": "

The ID of the asset (the dashboard, analysis, or dataset).

", + "CreateFolderMembershipRequest$MemberId": "

The ID of the asset that you want to add to the folder.

", "CreateFolderRequest$FolderId": "

The ID of the folder.

", "CreateFolderResponse$FolderId": "

The folder ID for the newly created folder.

", "DeleteFolderMembershipRequest$FolderId": "

The Folder ID.

", - "DeleteFolderMembershipRequest$MemberId": "

The ID of the asset (the dashboard, analysis, or dataset) that you want to delete.

", + "DeleteFolderMembershipRequest$MemberId": "

The ID of the asset that you want to delete.

", "DeleteFolderRequest$FolderId": "

The ID of the folder.

", "DeleteFolderResponse$FolderId": "

The ID of the folder.

", "DescribeFolderPermissionsRequest$FolderId": "

The ID of the folder.

", @@ -9071,6 +9082,14 @@ "FilledMapShapeConditionalFormatting$Format": "

The conditional formatting that determines the background color of a filled map's shape.

" } }, + "SharingModel": { + "base": null, + "refs": { + "CreateFolderRequest$SharingModel": "

An optional parameter that determines the sharing scope of the folder. The default value for this parameter is ACCOUNT.

", + "Folder$SharingModel": "

The sharing scope of the folder.

", + "FolderSummary$SharingModel": "

The sharing scope of the folder.

" + } + }, "Sheet": { "base": "

A sheet, which is an object that contains a set of visuals that are viewed together on one page in Amazon QuickSight. Every analysis and dashboard contains at least one sheet. Each sheet contains at least one visualization widget, for example a chart, pivot table, or narrative insight. Sheets can be associated with other components, such as controls, filters, and so on.

", "refs": { @@ -9414,7 +9433,7 @@ "SheetDefinition$SheetId": "

The unique identifier of a sheet.

", "SheetTextBox$SheetTextBoxId": "

The unique identifier for a text box. This identifier must be unique within the context of a dashboard, template, or analysis. Two dashboards, analyses, or templates can have text boxes that share identifiers.

", "SheetVisualScopingConfiguration$SheetId": "

The selected sheet that the filter is applied to.

", - "SnapshotFileSheetSelection$SheetId": "

The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV and PDF format types.

", + "SnapshotFileSheetSelection$SheetId": "

The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV, Excel, and PDF format types.

", "SnapshotFileSheetSelectionVisualIdList$member": null, "StartAssetBundleExportJobRequest$AssetBundleExportJobId": "

The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job.

", "StartAssetBundleExportJobResponse$AssetBundleExportJobId": "

The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job.

", @@ -9589,7 +9608,7 @@ "SnapshotFileFormatType": { "base": null, "refs": { - "SnapshotFile$FormatType": "

The format of the snapshot file to be generated. You can choose between CSV or PDF.

" + "SnapshotFile$FormatType": "

The format of the snapshot file to be generated. You can choose between CSV, Excel, or PDF.

" } }, "SnapshotFileGroup": { @@ -9620,19 +9639,19 @@ "SnapshotFileSheetSelectionList": { "base": null, "refs": { - "SnapshotFile$SheetSelections": "

A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations or 1 configuration for PDF.

" + "SnapshotFile$SheetSelections": "

A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations, 5 Excel configurations, or 1 configuration for PDF.

" } }, "SnapshotFileSheetSelectionScope": { "base": null, "refs": { - "SnapshotFileSheetSelection$SelectionScope": "

The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.

" + "SnapshotFileSheetSelection$SelectionScope": "

The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.

" } }, "SnapshotFileSheetSelectionVisualIdList": { "base": null, "refs": { - "SnapshotFileSheetSelection$VisualIds": "

A list of visual IDs that are located in the selected sheet. This structure supports tables and pivot tables. This structure is required if you are generating a CSV. You can add a maximum of 1 visual ID to this structure.

" + "SnapshotFileSheetSelection$VisualIds": "

A structure that lists the IDs of the visuals in the selected sheet. Supported visual types are table, pivot table visuals. This value is required if you are generating a CSV or Excel workbook. This value supports a maximum of 1 visual ID for CSV and 5 visual IDs across up to 5 sheet selections for Excel. If you are generating an Excel workbook, the order of the visual IDs provided in this structure determines the order of the worksheets in the Excel file.

" } }, "SnapshotJobErrorInfo": { @@ -10100,8 +10119,12 @@ "DescribeDataSetResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeDataSourcePermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeDataSourceResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DescribeFolderPermissionsRequest$NextToken": "

A pagination token for the next set of results.

", "DescribeFolderPermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DescribeFolderPermissionsResponse$NextToken": "

The pagination token for the next set of results, or null if there are no more results.

", + "DescribeFolderResolvedPermissionsRequest$NextToken": "

A pagination token for the next set of results.

", "DescribeFolderResolvedPermissionsResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", + "DescribeFolderResolvedPermissionsResponse$NextToken": "

A pagination token for the next set of results, or null if there are no more results.

", "DescribeFolderResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeGroupMembershipResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", "DescribeGroupResponse$RequestId": "

The Amazon Web Services request ID for this operation.

", @@ -10634,15 +10657,21 @@ "TableFieldOptionList": { "base": null, "refs": { - "TableFieldOptions$SelectedFieldOptions": "

The selected field options for the table field options.

" + "TableFieldOptions$SelectedFieldOptions": "

The field options to be configured to a table.

" } }, "TableFieldOptions": { - "base": "

The field options for a table visual.

", + "base": "

The field options of a table visual.

", "refs": { "TableConfiguration$FieldOptions": "

The field options for a table visual.

" } }, + "TableFieldOrderList": { + "base": "

A list of table field IDs.

", + "refs": { + "TablePinnedFieldOptions$PinnedLeftFields": "

A list of columns to be pinned to the left of a table visual.

" + } + }, "TableFieldURLConfiguration": { "base": "

The URL configuration for a table field.

", "refs": { @@ -10685,6 +10714,12 @@ "TableConfiguration$PaginatedReportOptions": "

The paginated report options for a table visual.

" } }, + "TablePinnedFieldOptions": { + "base": "

The settings for the pinned columns of a table visual.

", + "refs": { + "TableFieldOptions$PinnedFieldOptions": "

The settings for the pinned columns of a table visual.

" + } + }, "TableRowConditionalFormatting": { "base": "

The conditional formatting of a table row.

", "refs": { diff --git a/models/apis/quicksight/2018-04-01/paginators-1.json b/models/apis/quicksight/2018-04-01/paginators-1.json index e5ce3441c3c..512e1e44b04 100644 --- a/models/apis/quicksight/2018-04-01/paginators-1.json +++ b/models/apis/quicksight/2018-04-01/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "DescribeFolderPermissions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Permissions" + }, + "DescribeFolderResolvedPermissions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Permissions" + }, "ListAnalyses": { "input_token": "NextToken", "output_token": "NextToken", @@ -42,6 +54,18 @@ "limit_key": "MaxResults", "result_key": "DataSources" }, + "ListFolderMembers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderMemberList" + }, + "ListFolders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderSummaryList" + }, "ListGroupMemberships": { "input_token": "NextToken", "output_token": "NextToken", @@ -154,6 +178,12 @@ "limit_key": "MaxResults", "result_key": "DataSourceSummaries" }, + "SearchFolders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderSummaryList" + }, "SearchGroups": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 92d5c1e2a9f..58a172da29d 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -4830,7 +4830,7 @@ "RestoreDBClusterFromS3Message$DBClusterParameterGroupName": "

The name of the DB cluster parameter group to associate with the restored DB cluster. If this argument is omitted, the default parameter group for the engine version is used.

Constraints:

", "RestoreDBClusterFromS3Message$DBSubnetGroupName": "

A DB subnet group to associate with the restored DB cluster.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mydbsubnetgroup

", "RestoreDBClusterFromS3Message$Engine": "

The name of the database engine to be used for this DB cluster.

Valid Values: aurora-mysql (for Aurora MySQL)

", - "RestoreDBClusterFromS3Message$EngineVersion": "

The version number of the database engine to use.

To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

Aurora MySQL

Examples: 5.7.mysql_aurora.2.07.1, 8.0.mysql_aurora.3.02.0

", + "RestoreDBClusterFromS3Message$EngineVersion": "

The version number of the database engine to use.

To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

Aurora MySQL

Examples: 5.7.mysql_aurora.2.12.0, 8.0.mysql_aurora.3.04.0

", "RestoreDBClusterFromS3Message$MasterUsername": "

The name of the master user for the restored DB cluster.

Constraints:

", "RestoreDBClusterFromS3Message$MasterUserPassword": "

The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

Constraints:

", "RestoreDBClusterFromS3Message$OptionGroupName": "

A value that indicates that the restored DB cluster should be associated with the specified option group.

Permanent options can't be removed from an option group. An option group can't be removed from a DB cluster once it is associated with a DB cluster.

", @@ -4838,7 +4838,7 @@ "RestoreDBClusterFromS3Message$PreferredMaintenanceWindow": "

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon Aurora User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

", "RestoreDBClusterFromS3Message$KmsKeyId": "

The Amazon Web Services KMS key identifier for an encrypted DB cluster.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

If the StorageEncrypted parameter is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

", "RestoreDBClusterFromS3Message$SourceEngine": "

The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket.

Valid values: mysql

", - "RestoreDBClusterFromS3Message$SourceEngineVersion": "

The version of the database that the backup files were created from.

MySQL versions 5.5, 5.6, and 5.7 are supported.

Example: 5.6.40, 5.7.28

", + "RestoreDBClusterFromS3Message$SourceEngineVersion": "

The version of the database that the backup files were created from.

MySQL versions 5.7 and 8.0 are supported.

Example: 5.7.40, 8.0.28

", "RestoreDBClusterFromS3Message$S3BucketName": "

The name of the Amazon S3 bucket that contains the data used to create the Amazon Aurora DB cluster.

", "RestoreDBClusterFromS3Message$S3Prefix": "

The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster. If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket.

", "RestoreDBClusterFromS3Message$S3IngestionRoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that authorizes Amazon RDS to access the Amazon S3 bucket on your behalf.

", diff --git a/models/apis/rds/2014-10-31/paginators-1.json b/models/apis/rds/2014-10-31/paginators-1.json index b6db47f81c0..41d01c2eb56 100644 --- a/models/apis/rds/2014-10-31/paginators-1.json +++ b/models/apis/rds/2014-10-31/paginators-1.json @@ -12,6 +12,12 @@ "output_token": "Marker", "result_key": "Certificates" }, + "DescribeDBClusterAutomatedBackups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterAutomatedBackups" + }, "DescribeDBClusterBacktracks": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/models/apis/s3/2006-03-01/endpoint-tests-1.json b/models/apis/s3/2006-03-01/endpoint-tests-1.json index cad90f95e8f..4b528a224f5 100644 --- a/models/apis/s3/2006-03-01/endpoint-tests-1.json +++ b/models/apis/s3/2006-03-01/endpoint-tests-1.json @@ -303,7 +303,6 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": true, "UseFIPS": false } @@ -940,10 +939,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -980,10 +977,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-east-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -1022,10 +1017,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-east-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3842,8 +3835,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3882,8 +3874,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3923,8 +3914,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3963,8 +3953,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4003,8 +3992,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4044,8 +4032,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4073,8 +4060,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4112,8 +4098,7 @@ "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4152,8 +4137,7 @@ "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4219,8 +4203,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4259,8 +4242,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4300,8 +4282,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4340,8 +4321,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4380,8 +4360,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4421,8 +4400,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4450,8 +4428,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4490,8 +4467,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4558,8 +4534,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4599,8 +4574,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4626,10 +4600,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4668,8 +4640,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4706,8 +4677,7 @@ "Bucket": "99a_b", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4746,8 +4716,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4803,8 +4772,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4844,8 +4812,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4871,10 +4838,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4913,8 +4878,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4951,8 +4915,7 @@ "Bucket": "99a_b", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4991,8 +4954,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5060,8 +5022,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5101,8 +5062,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5128,10 +5088,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5170,8 +5128,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5208,8 +5165,7 @@ "Bucket": "99a_b", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5249,8 +5205,7 @@ "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5291,8 +5246,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5321,8 +5275,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5351,8 +5304,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5381,8 +5333,7 @@ "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5421,10 +5372,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5464,8 +5413,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5506,8 +5454,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5550,8 +5497,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5605,10 +5551,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5648,8 +5592,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5690,8 +5633,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5720,8 +5662,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5750,8 +5691,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5780,8 +5720,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5820,10 +5759,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5860,10 +5797,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5901,10 +5836,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5930,10 +5863,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5972,10 +5903,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6012,10 +5941,8 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6028,7 +5955,6 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": true } @@ -6056,10 +5982,8 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6072,7 +5996,6 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": true, "UseFIPS": true } @@ -6111,10 +6034,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6152,10 +6073,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6181,10 +6100,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6223,10 +6140,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6329,10 +6244,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6361,10 +6274,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6403,10 +6314,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6443,10 +6352,8 @@ "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6473,10 +6380,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -7224,10 +7129,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 51762e4436b..66619afe495 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -84,13 +84,10 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket", - "CreateBucketConfiguration": { - "LocationConstraint": "eu-west-1" - } + "Bucket": "examplebucket" }, "output": { - "Location": "http://examplebucket..s3.amazonaws.com/" + "Location": "/examplebucket" }, "comments": { "input": { @@ -98,16 +95,19 @@ "output": { } }, - "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", - "id": "to-create-a-bucket-in-a-specific-region-1483399072992", - "title": "To create a bucket in a specific region" + "description": "The following example creates a bucket.", + "id": "to-create-a-bucket--1472851826060", + "title": "To create a bucket " }, { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "CreateBucketConfiguration": { + "LocationConstraint": "eu-west-1" + } }, "output": { - "Location": "/examplebucket" + "Location": "http://examplebucket..s3.amazonaws.com/" }, "comments": { "input": { @@ -115,9 +115,9 @@ "output": { } }, - "description": "The following example creates a bucket.", - "id": "to-create-a-bucket--1472851826060", - "title": "To create a bucket " + "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", + "id": "to-create-a-bucket-in-a-specific-region-1483399072992", + "title": "To create a bucket in a specific region" } ], "CreateMultipartUpload": [ @@ -257,8 +257,10 @@ "DeleteObject": [ { "input": { - "Bucket": "ExampleBucket", - "Key": "HappyFace.jpg" + "Bucket": "examplebucket", + "Key": "objectkey.jpg" + }, + "output": { }, "comments": { "input": { @@ -266,16 +268,14 @@ "output": { } }, - "description": "The following example deletes an object from a non-versioned bucket.", - "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", - "title": "To delete an object (from a non-versioned bucket)" + "description": "The following example deletes an object from an S3 bucket.", + "id": "to-delete-an-object-1472850136595", + "title": "To delete an object" }, { "input": { - "Bucket": "examplebucket", - "Key": "objectkey.jpg" - }, - "output": { + "Bucket": "ExampleBucket", + "Key": "HappyFace.jpg" }, "comments": { "input": { @@ -283,19 +283,20 @@ "output": { } }, - "description": "The following example deletes an object from an S3 bucket.", - "id": "to-delete-an-object-1472850136595", - "title": "To delete an object" + "description": "The following example deletes an object from a non-versioned bucket.", + "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", + "title": "To delete an object (from a non-versioned bucket)" } ], "DeleteObjectTagging": [ { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "HappyFace.jpg", + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "output": { - "VersionId": "null" + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "comments": { "input": { @@ -303,18 +304,17 @@ "output": { } }, - "description": "The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the operation removes tag set from the latest object version.", - "id": "to-remove-tag-set-from-an-object-1483145342862", - "title": "To remove tag set from an object" + "description": "The following example removes tag set associated with the specified object version. The request specifies both the object key and object version.", + "id": "to-remove-tag-set-from-an-object-version-1483145285913", + "title": "To remove tag set from an object version" }, { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "Key": "HappyFace.jpg" }, "output": { - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "VersionId": "null" }, "comments": { "input": { @@ -322,9 +322,9 @@ "output": { } }, - "description": "The following example removes tag set associated with the specified object version. The request specifies both the object key and object version.", - "id": "to-remove-tag-set-from-an-object-version-1483145285913", - "title": "To remove tag set from an object version" + "description": "The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the operation removes tag set from the latest object version.", + "id": "to-remove-tag-set-from-an-object-1483145342862", + "title": "To remove tag set from an object" } ], "DeleteObjects": [ @@ -728,18 +728,17 @@ { "input": { "Bucket": "examplebucket", - "Key": "SampleFile.txt", - "Range": "bytes=0-9" + "Key": "HappyFace.jpg" }, "output": { "AcceptRanges": "bytes", - "ContentLength": "10", - "ContentRange": "bytes 0-9/43", - "ContentType": "text/plain", - "ETag": "\"0d94420ffd0bc68cd3d152506b97a9cc\"", - "LastModified": "Thu, 09 Oct 2014 22:57:28 GMT", + "ContentLength": "3191", + "ContentType": "image/jpeg", + "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", + "LastModified": "Thu, 15 Dec 2016 01:19:41 GMT", "Metadata": { }, + "TagCount": 2, "VersionId": "null" }, "comments": { @@ -748,24 +747,25 @@ "output": { } }, - "description": "The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a specific byte range.", - "id": "to-retrieve-a-byte-range-of-an-object--1481832674603", - "title": "To retrieve a byte range of an object " + "description": "The following example retrieves an object for an S3 bucket.", + "id": "to-retrieve-an-object-1481827837012", + "title": "To retrieve an object" }, { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "SampleFile.txt", + "Range": "bytes=0-9" }, "output": { "AcceptRanges": "bytes", - "ContentLength": "3191", - "ContentType": "image/jpeg", - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "LastModified": "Thu, 15 Dec 2016 01:19:41 GMT", + "ContentLength": "10", + "ContentRange": "bytes 0-9/43", + "ContentType": "text/plain", + "ETag": "\"0d94420ffd0bc68cd3d152506b97a9cc\"", + "LastModified": "Thu, 09 Oct 2014 22:57:28 GMT", "Metadata": { }, - "TagCount": 2, "VersionId": "null" }, "comments": { @@ -774,9 +774,9 @@ "output": { } }, - "description": "The following example retrieves an object for an S3 bucket.", - "id": "to-retrieve-an-object-1481827837012", - "title": "To retrieve an object" + "description": "The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a specific byte range.", + "id": "to-retrieve-a-byte-range-of-an-object--1481832674603", + "title": "To retrieve a byte range of an object " } ], "GetObjectAcl": [ @@ -989,37 +989,47 @@ "ListMultipartUploads": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "KeyMarker": "nextkeyfrompreviousresponse", + "MaxUploads": "2", + "UploadIdMarker": "valuefrompreviousresponse" }, "output": { + "Bucket": "acl1", + "IsTruncated": true, + "KeyMarker": "", + "MaxUploads": "2", + "NextKeyMarker": "someobjectkey", + "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", + "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", - "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "mohanataws", + "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1029,53 +1039,43 @@ "output": { } }, - "description": "The following example lists in-progress multipart uploads on a specific bucket.", - "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", - "title": "To list in-progress multipart uploads on a bucket" + "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", + "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", + "title": "List next set of multipart uploads when previous result is truncated" }, { "input": { - "Bucket": "examplebucket", - "KeyMarker": "nextkeyfrompreviousresponse", - "MaxUploads": "2", - "UploadIdMarker": "valuefrompreviousresponse" + "Bucket": "examplebucket" }, "output": { - "Bucket": "acl1", - "IsTruncated": true, - "KeyMarker": "", - "MaxUploads": "2", - "NextKeyMarker": "someobjectkey", - "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", - "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "mohanataws", - "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "display-name", + "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1085,9 +1085,9 @@ "output": { } }, - "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", - "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", - "title": "List next set of multipart uploads when previous result is truncated" + "description": "The following example lists in-progress multipart uploads on a specific bucket.", + "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", + "title": "To list in-progress multipart uploads on a bucket" } ], "ListObjectVersions": [ @@ -1569,11 +1569,14 @@ "input": { "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "HappyFace.jpg", + "ServerSideEncryption": "AES256", + "StorageClass": "STANDARD_IA" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" + "ServerSideEncryption": "AES256", + "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" }, "comments": { "input": { @@ -1581,23 +1584,20 @@ "output": { } }, - "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", - "id": "to-upload-an-object-1481760101010", - "title": "To upload an object" + "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", + "id": "to-upload-an-object-(specify-optional-headers)", + "title": "To upload an object (specify optional headers)" }, { "input": { - "Body": "filetoupload", + "Body": "c:\\HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", - "Metadata": { - "metadata1": "value1", - "metadata2": "value2" - } + "Key": "HappyFace.jpg", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" + "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" }, "comments": { "input": { @@ -1605,22 +1605,22 @@ "output": { } }, - "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", - "title": "To upload object and specify user-defined metadata" + "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", + "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", + "title": "To upload an object and specify optional tags" }, { "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", + "Key": "exampleobject", "ServerSideEncryption": "AES256", - "StorageClass": "STANDARD_IA" + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", "ServerSideEncryption": "AES256", - "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" + "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" }, "comments": { "input": { @@ -1628,9 +1628,9 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", - "id": "to-upload-an-object-(specify-optional-headers)", - "title": "To upload an object (specify optional headers)" + "description": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", + "title": "To upload an object and specify server-side encryption and object tags" }, { "input": { @@ -1654,16 +1654,13 @@ }, { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", - "ServerSideEncryption": "AES256", - "Tagging": "key1=value1&key2=value2" + "Key": "HappyFace.jpg" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" }, "comments": { "input": { @@ -1671,20 +1668,20 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", - "title": "To upload an object and specify server-side encryption and object tags" + "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "id": "to-upload-an-object-1481760101010", + "title": "To upload an object" }, { "input": { - "Body": "c:\\HappyFace.jpg", + "ACL": "authenticated-read", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "Tagging": "key1=value1&key2=value2" + "Key": "exampleobject" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" + "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" }, "comments": { "input": { @@ -1692,20 +1689,23 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", - "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", - "title": "To upload an object and specify optional tags" + "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", + "title": "To upload an object and specify canned ACL." }, { "input": { - "ACL": "authenticated-read", "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "exampleobject" + "Key": "exampleobject", + "Metadata": { + "metadata1": "value1", + "metadata2": "value2" + } }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" + "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" }, "comments": { "input": { @@ -1713,9 +1713,9 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", - "title": "To upload an object and specify canned ACL." + "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", + "title": "To upload object and specify user-defined metadata" } ], "PutObjectAcl": [ diff --git a/models/apis/s3control/2018-08-20/endpoint-rule-set-1.json b/models/apis/s3control/2018-08-20/endpoint-rule-set-1.json index ac2587dad9a..8e4ec39fae7 100644 --- a/models/apis/s3control/2018-08-20/endpoint-rule-set-1.json +++ b/models/apis/s3control/2018-08-20/endpoint-rule-set-1.json @@ -61,148 +61,265 @@ }, "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { "ref": "Region" + }, + "snow" + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "S3 Snow does not support DualStack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "S3 Snow does not support FIPS", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "OutpostId" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "snow" + true ] }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ], + "error": "Partition does not support FIPS", + "type": "error" + }, + { + "conditions": [ { "fn": "isSet", "argv": [ { - "ref": "Endpoint" + "ref": "RequiresAccountId" } ] }, { - "fn": "parseURL", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] } - ], - "assign": "url" + ] } ], - "type": "tree", - "rules": [ + "error": "AccountId is required but not set", + "type": "error" + }, + { + "conditions": [ { - "conditions": [ + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ { - "fn": "aws.partition", + "fn": "isValidHostLabel", "argv": [ { - "ref": "Region" - } - ], - "assign": "partitionResult" + "ref": "AccountId" + }, + false + ] } - ], - "type": "tree", - "rules": [ + ] + } + ], + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" + }, + { + "conditions": [ + { + "fn": "not", + "argv": [ { - "conditions": [], - "type": "tree", - "rules": [ + "fn": "isValidHostLabel", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "S3 Snow does not support Dual-stack", - "type": "error" + "ref": "OutpostId" }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "S3 Snow does not support FIPS", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": "{url#scheme}://{url#authority}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } + false ] } ] - }, - { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" } - ] + ], + "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" }, { "conditions": [ { - "fn": "isSet", + "fn": "isValidHostLabel", "argv": [ { - "ref": "OutpostId" - } + "ref": "Region" + }, + true ] } ], @@ -211,318 +328,716 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid configuration: Outposts do not support dual-stack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } ], - "assign": "partitionResult" + "assign": "url" } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" + "ref": "UseFIPS" }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "Invalid region: region was not a valid DNS name.", + "type": "error" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccessPointName" + } + ] + }, + { + "fn": "aws.parseArn", + "argv": [ + { + "ref": "AccessPointName" + } + ], + "assign": "accessPointArn" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[0]" + ], + "assign": "arnType" + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "arnType" + }, + "" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "service" + ] + }, + "s3-outposts" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid configuration: Outpost Access Points do not support dual-stack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[1]" + ], + "assign": "outpostId" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "outpostId" + }, + false + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "UseArnRegion" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseArnRegion" + }, + false + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ { - "ref": "AccountId" - } + "ref": "accessPointArn" + }, + "region" ] - } + }, + "{Region}" ] } + ] + } + ], + "error": "Invalid configuration: region from ARN `{accessPointArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", + "type": "error" + }, + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } ], - "error": "AccountId is required but not set", - "type": "error" - }, + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "aws.partition", + "argv": [ { - "fn": "isSet", + "fn": "getAttr", "argv": [ { - "ref": "AccountId" - } + "ref": "accessPointArn" + }, + "region" ] - }, + } + ], + "assign": "arnPartition" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "not", + "fn": "stringEquals", "argv": [ { - "fn": "isValidHostLabel", + "fn": "getAttr", + "argv": [ + { + "ref": "arnPartition" + }, + "name" + ] + }, + { + "fn": "getAttr", "argv": [ { - "ref": "AccountId" + "ref": "partitionResult" }, - false + "name" ] } ] } ], - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { "conditions": [ { - "fn": "not", + "fn": "isValidHostLabel", "argv": [ { - "fn": "isValidHostLabel", + "fn": "getAttr", "argv": [ { - "ref": "OutpostId" + "ref": "accessPointArn" }, - false + "region" ] - } + }, + true ] } ], - "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isValidHostLabel", + "fn": "not", "argv": [ { - "ref": "Region" - }, - true + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "accountId" + ] + }, + "" + ] + } ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "accountId" + ] + }, + false + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountId" + }, + "{accessPointArn#accountId}" + ] + } ] } ], - "error": "Invalid configuration: Outposts do not support dual-stack", + "error": "Invalid ARN: the accountId specified in the ARN (`{accessPointArn#accountId}`) does not match the parameter (`{AccountId}`)", "type": "error" }, { - "conditions": [], + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[2]" + ], + "assign": "outpostType" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", + "fn": "getAttr", "argv": [ { - "ref": "Endpoint" - } + "ref": "accessPointArn" + }, + "resourceId[3]" ], - "assign": "url" + "assign": "accessPointName" } ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ + "type": "tree", + "rules": [ + { + "conditions": [ { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "fn": "stringEquals", + "argv": [ + { + "ref": "outpostType" + }, + "accesspoint" + ] + } + ], + "type": "tree", + "rules": [ { - "ref": "UseFIPS" + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{accessPointArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{accessPointArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" } ] }, - "headers": {} - }, - "type": "endpoint" + { + "conditions": [], + "error": "Expected an outpost type `accesspoint`, found `{outpostType}`", + "type": "error" + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid ARN: expected an access point name", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: Expected a 4-component resource", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{accessPointArn#accountId}`", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid region: region was not a valid DNS name.", + "error": "Invalid ARN: missing account ID", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid region in ARN: `{accessPointArn#region}` (invalid DNS name)", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", + "type": "error" } ] } ] } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "type": "error" } ] }, { "conditions": [], - "error": "A valid partition could not be determined", + "error": "Invalid ARN: The Outpost Id was not set", "type": "error" } ] + } + ] + }, + { + "conditions": [], + "error": "Invalid ARN: No ARN type specified", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Bucket" + } + ] + }, + { + "fn": "aws.parseArn", + "argv": [ + { + "ref": "Bucket" + } + ], + "assign": "bucketArn" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "resourceId[0]" + ], + "assign": "arnType" }, { - "conditions": [ + "fn": "not", + "argv": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { - "ref": "AccessPointName" - } + "ref": "arnType" + }, + "" ] - }, + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "aws.parseArn", + "fn": "stringEquals", "argv": [ { - "ref": "AccessPointName" - } - ], - "assign": "accessPointArn" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "service" + ] + }, + "s3-outposts" + ] } ], "type": "tree", @@ -530,28 +1045,29 @@ { "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseDualStack" }, - "resourceId[0]" - ], - "assign": "arnType" - }, + true + ] + } + ], + "error": "Invalid configuration: Outpost buckets do not support dual-stack", + "type": "error" + }, + { + "conditions": [ { - "fn": "not", + "fn": "getAttr", "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] - } - ] + "ref": "bucketArn" + }, + "resourceId[1]" + ], + "assign": "outpostId" } ], "type": "tree", @@ -559,18 +1075,12 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "isValidHostLabel", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "service" - ] + "ref": "outpostId" }, - "s3-outposts" + false ] } ], @@ -578,40 +1088,107 @@ "rules": [ { "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "UseArnRegion" + } + ] + }, { "fn": "booleanEquals", "argv": [ { - "ref": "UseDualStack" + "ref": "UseArnRegion" }, - true + false + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] + }, + "{Region}" + ] + } ] } ], - "error": "Invalid configuration: Outpost Access Points do not support dual-stack", + "error": "Invalid configuration: region from ARN `{bucketArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", "type": "error" }, { - "conditions": [], + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] + } + ], + "assign": "arnPartition" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "getAttr", + "fn": "aws.partition", "argv": [ { - "ref": "accessPointArn" - }, - "resourceId[1]" + "ref": "Region" + } ], - "assign": "outpostId" + "assign": "partitionResult" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "arnPartition" + }, + "name" + ] + }, + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -620,522 +1197,439 @@ "fn": "isValidHostLabel", "argv": [ { - "ref": "outpostId" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] }, - false + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "not", + "argv": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { - "ref": "UseArnRegion" - } + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "accountId" + ] + }, + "" ] - }, + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "booleanEquals", + "fn": "isValidHostLabel", "argv": [ { - "ref": "UseArnRegion" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "accountId" + ] }, false ] - }, + } + ], + "type": "tree", + "rules": [ { - "fn": "not", - "argv": [ + "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "AccountId" }, - "region" + "{bucketArn#accountId}" ] - }, - "{Region}" + } ] } - ] - } - ], - "error": "Invalid configuration: region from ARN `{accessPointArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ + ], + "error": "Invalid ARN: the accountId specified in the ARN (`{bucketArn#accountId}`) does not match the parameter (`{AccountId}`)", + "type": "error" + }, { "conditions": [ { - "fn": "aws.partition", + "fn": "getAttr", "argv": [ { - "ref": "Region" - } + "ref": "bucketArn" + }, + "resourceId[2]" ], - "assign": "partitionResult" + "assign": "outpostType" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "resourceId[3]" + ], + "assign": "bucketName" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "fn": "getAttr", + "ref": "outpostType" + }, + "bucket" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseFIPS" }, - "region" + true ] } ], - "assign": "arnPartition" - } - ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://s3-outposts-fips.{bucketArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, { - "conditions": [ + "fn": "parseURL", + "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arnPartition" - }, - "name" - ] - }, - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - } - ] + "ref": "Endpoint" } ], - "type": "tree", - "rules": [ + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "region" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "accountId" - ] - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "AccountId" - }, - "{accessPointArn#accountId}" - ] - } - ] - } - ], - "error": "Invalid ARN: the accountId specified in the ARN (`{accessPointArn#accountId}`) does not match the parameter (`{AccountId}`)", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "resourceId[2]" - ], - "assign": "outpostType" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "resourceId[3]" - ], - "assign": "accessPointName" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "outpostType" - }, - "accesspoint" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{accessPointArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{accessPointArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Expected an outpost type `accesspoint`, found `{outpostType}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: expected an access point name", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected a 4-component resource", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{accessPointArn#accountId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: missing account ID", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid region in ARN: `{accessPointArn#region}` (invalid DNS name)", - "type": "error" - } - ] + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" } ] }, - { - "conditions": [], - "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", - "type": "error" + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] } - ] + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{bucketArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" } ] }, { "conditions": [], - "error": "Could not load partition for ARN region `{accessPointArn#region}`", + "error": "Invalid ARN: Expected an outpost type `bucket`, found `{outpostType}`", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: expected a bucket name", + "type": "error" } ] }, { "conditions": [], - "error": "A valid partition could not be determined", + "error": "Invalid ARN: Expected a 4-component resource", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{bucketArn#accountId}`", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: missing account ID", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "error": "Invalid region in ARN: `{bucketArn#region}` (invalid DNS name)", "type": "error" } ] + }, + { + "conditions": [], + "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", + "type": "error" } ] - }, - { - "conditions": [], - "error": "Invalid ARN: The Outpost Id was not set", - "type": "error" } ] } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid ARN: No ARN type specified", + "error": "Invalid ARN: The Outpost Id was not set", "type": "error" } ] + } + ] + }, + { + "conditions": [], + "error": "Invalid ARN: No ARN type specified", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "Region" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ], + "error": "Partition does not support FIPS", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ] + } + ], + "error": "AccountId is required but not set", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "AccountId" + }, + false + ] + } + ] + } + ], + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" }, { "conditions": [ @@ -1143,18 +1637,18 @@ "fn": "isSet", "argv": [ { - "ref": "Bucket" + "ref": "Endpoint" } ] }, { - "fn": "aws.parseArn", + "fn": "parseURL", "argv": [ { - "ref": "Bucket" + "ref": "Endpoint" } ], - "assign": "bucketArn" + "assign": "url" } ], "type": "tree", @@ -1162,1304 +1656,493 @@ { "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "bucketArn" + "ref": "UseDualStack" }, - "resourceId[0]" - ], - "assign": "arnType" + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] }, { - "fn": "not", + "fn": "booleanEquals", "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [ + "endpoint": { + "url": "{url#scheme}://{AccountId}.{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "service" - ] - }, - "s3-outposts" - ] + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid configuration: Outpost buckets do not support dual-stack", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[1]" - ], - "assign": "outpostId" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "outpostId" - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "UseArnRegion" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseArnRegion" - }, - false - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - }, - "{Region}" - ] - } - ] - } - ], - "error": "Invalid configuration: region from ARN `{bucketArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - } - ], - "assign": "arnPartition" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "partitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arnPartition" - }, - "name" - ] - }, - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "accountId" - ] - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "AccountId" - }, - "{bucketArn#accountId}" - ] - } - ] - } - ], - "error": "Invalid ARN: the accountId specified in the ARN (`{bucketArn#accountId}`) does not match the parameter (`{AccountId}`)", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[2]" - ], - "assign": "outpostType" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[3]" - ], - "assign": "bucketName" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "outpostType" - }, - "bucket" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{bucketArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{bucketArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected an outpost type `bucket`, found `{outpostType}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: expected a bucket name", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected a 4-component resource", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{bucketArn#accountId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: missing account ID", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid region in ARN: `{bucketArn#region}` (invalid DNS name)", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Could not load partition for ARN region `{bucketArn#region}`", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The Outpost Id was not set", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: No ARN type specified", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "partitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "Region" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ] - } - ], - "error": "AccountId is required but not set", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "AccountId" - }, - false - ] - } - ] - } - ], - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "{url#scheme}://{AccountId}.{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://s3-control-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://s3-control.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - }, + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "Invalid region: region was not a valid DNS name.", - "type": "error" + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" } ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" } ] }, { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] } - ] + ], + "endpoint": { + "url": "https://{AccountId}.s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://s3-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://s3-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid region: region was not a valid DNS name.", + "type": "error" } ] - }, - { - "conditions": [], - "error": "Region must be set", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Region must be set", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/s3control/2018-08-20/endpoint-tests-1.json b/models/apis/s3control/2018-08-20/endpoint-tests-1.json index afb8c275442..6d2656e10b9 100644 --- a/models/apis/s3control/2018-08-20/endpoint-tests-1.json +++ b/models/apis/s3control/2018-08-20/endpoint-tests-1.json @@ -771,7 +771,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -811,7 +810,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -849,7 +847,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "Region": "us-east-2", "RequiresAccountId": false, "UseDualStack": false, @@ -880,14 +877,13 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", - "Operation": "ListRegionalBuckets", + "AccountId": "123456789012", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -909,7 +905,7 @@ } ] }, - "url": "https://123.s3-control.us-east-2.amazonaws.com" + "url": "https://123456789012.s3-control.us-east-2.amazonaws.com" } }, "operationInputs": [ @@ -919,13 +915,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "123", - "Operation": "ListRegionalBuckets", + "AccountId": "123456789012", "Region": "us-east-2", "RequiresAccountId": true, "UseDualStack": false, @@ -957,14 +952,13 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", - "Operation": "CreateBucket", + "AccountId": "123456789012", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1131,7 +1125,7 @@ { "documentation": "Account ID set inline and in ARN and they do not match@us-west-2", "expect": { - "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`9999999`)" + "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`999999999999`)" }, "operationInputs": [ { @@ -1141,14 +1135,14 @@ }, "operationName": "GetAccessPoint", "operationParams": { - "AccountId": "9999999", + "AccountId": "999999999999", "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint" } } ], "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "9999999", + "AccountId": "999999999999", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": false, @@ -1190,7 +1184,6 @@ "AccessPointName": "apname", "AccountId": "123456789012", "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1239,7 +1232,6 @@ "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1254,7 +1246,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1269,7 +1260,6 @@ "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1296,7 +1286,6 @@ "params": { "Bucket": "bucketname", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-west-2", "RequiresAccountId": false, @@ -1337,14 +1326,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1376,15 +1365,14 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", + "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1418,15 +1406,14 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", + "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1468,7 +1455,6 @@ "params": { "Bucket": "blah", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -1484,7 +1470,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -1499,7 +1484,6 @@ "params": { "Bucket": "bucketname", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-west-2", "RequiresAccountId": false, @@ -1540,7 +1524,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -1584,7 +1569,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1628,7 +1614,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1673,7 +1660,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1718,7 +1706,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1776,7 +1765,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -1820,7 +1810,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1864,7 +1855,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1909,7 +1901,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1954,7 +1947,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2012,7 +2006,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -2056,7 +2051,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2100,7 +2096,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2145,7 +2142,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2190,7 +2188,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2356,11 +2355,11 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2501,7 +2500,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2511,12 +2510,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2561,7 +2560,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2572,12 +2571,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2598,7 +2597,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.dualstack.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.dualstack.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2610,12 +2609,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": true, @@ -2636,7 +2635,7 @@ } ] }, - "url": "https://1234567890-aBC.example.com" + "url": "https://123456789012.example.com" } }, "operationInputs": [ @@ -2647,12 +2646,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "Endpoint": "https://example.com" @@ -2704,7 +2703,7 @@ } }, { - "documentation": "account id with custom endpoint, fips and dualstack", + "documentation": "account id with custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -2717,7 +2716,7 @@ } ] }, - "url": "https://1234567890-aBC.example.com" + "url": "https://123456789012.example.com" } }, "operationInputs": [ @@ -2729,21 +2728,20 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": true + "UseFIPS": true } }, { - "documentation": "custom endpoint, fips and dualstack", + "documentation": "custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -2762,8 +2760,7 @@ "params": { "Region": "us-east-1", "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": true + "UseFIPS": true } }, { @@ -2786,32 +2783,19 @@ "params": { "Region": "us-east-1", "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": false + "UseFIPS": true } }, { - "documentation": "custom endpoint, dualstack", + "documentation": "custom endpoint, DualStack", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://example.com" - } + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "params": { "Region": "us-east-1", "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualstack": true + "UseDualStack": true } }, { @@ -2835,7 +2819,6 @@ "error": "AccountId is required but not set" }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -2862,7 +2845,6 @@ ], "params": { "AccountId": "/?invalid¬-host*label", - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -2943,7 +2925,6 @@ "AccessPointName": "apname", "Endpoint": "https://beta.example.com", "AccountId": "123456789012", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -2978,7 +2959,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -2986,9 +2966,9 @@ } }, { - "documentation": "Dualstack + Custom endpoint is not supported(non-arn)", + "documentation": "DualStack + Custom endpoint is not supported(non-arn)", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "operationInputs": [ { @@ -3008,7 +2988,6 @@ "AccessPointName": "apname", "Endpoint": "https://beta.example.com", "AccountId": "123456789012", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -3029,14 +3008,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -3063,7 +3042,6 @@ ], "params": { "AccountId": "0123456789012", - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "cn-north-1", "RequiresAccountId": true, @@ -3090,7 +3068,6 @@ ], "params": { "AccountId": "0123456789012", - "Operation": "ListRegionalBuckets", "OutpostId": "?outpost/invalid+", "Region": "us-west-1", "RequiresAccountId": true, @@ -3118,7 +3095,6 @@ "error": "Invalid region: region was not a valid DNS name." }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "invalid-region 42", "AccountId": "0123456", @@ -3145,7 +3121,6 @@ } }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-west-2", "UseDualStack": false, @@ -3205,14 +3180,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": false, @@ -3308,7 +3283,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -3333,13 +3309,13 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": true, @@ -3568,22 +3544,20 @@ "Bucket": "bucketName", "Endpoint": "https://10.0.1.12:433", "UseFIPS": true, - "UseDualStack": false, - "Accelerate": false + "UseDualStack": false } }, { - "documentation": "S3 Snow Control with Dual-stack enabled", + "documentation": "S3 Snow Control with Dualstack enabled", "expect": { - "error": "S3 Snow does not support Dual-stack" + "error": "S3 Snow does not support DualStack" }, "params": { "Region": "snow", "Bucket": "bucketName", "Endpoint": "https://10.0.1.12:433", "UseFIPS": false, - "UseDualStack": true, - "Accelerate": false + "UseDualStack": true } } ], diff --git a/models/apis/verifiedpermissions/2021-12-01/docs-2.json b/models/apis/verifiedpermissions/2021-12-01/docs-2.json index 409caabdb52..28a511f99e6 100644 --- a/models/apis/verifiedpermissions/2021-12-01/docs-2.json +++ b/models/apis/verifiedpermissions/2021-12-01/docs-2.json @@ -2,10 +2,10 @@ "version": "2.0", "service": "

Amazon Verified Permissions is a permissions management service from Amazon Web Services. You can use Verified Permissions to manage permissions for your application, and authorize user access based on those permissions. Using Verified Permissions, application developers can grant access based on information about the users, resources, and requested actions. You can also evaluate additional information like group membership, attributes of the resources, and session context, such as time of request and IP addresses. Verified Permissions manages these permissions by letting you create and store authorization policies for your applications, such as consumer-facing web sites and enterprise business systems.

Verified Permissions uses Cedar as the policy language to express your permission requirements. Cedar supports both role-based access control (RBAC) and attribute-based access control (ABAC) authorization models.

For more information about configuring, administering, and using Amazon Verified Permissions in your applications, see the Amazon Verified Permissions User Guide.

For more information about the Cedar policy language, see the Cedar Policy Language Guide.

When you write Cedar policies that reference principals, resources and actions, you can define the unique identifiers used for each of those elements. We strongly recommend that you follow these best practices:

  • Use values like universally unique identifiers (UUIDs) for all principal and resource identifiers.

    For example, if user jane leaves the company, and you later let someone else use the name jane, then that new user automatically gets access to everything granted by policies that still reference User::\"jane\". Cedar can’t distinguish between the new user and the old. This applies to both principal and resource identifiers. Always use identifiers that are guaranteed unique and never reused to ensure that you don’t unintentionally grant access because of the presence of an old identifier in a policy.

    Where you use a UUID for an entity, we recommend that you follow it with the // comment specifier and the ‘friendly’ name of your entity. This helps to make your policies easier to understand. For example: principal == User::\"a1b2c3d4-e5f6-a1b2-c3d4-EXAMPLE11111\", // alice

  • Do not include personally identifying, confidential, or sensitive information as part of the unique identifier for your principals or resources. These identifiers are included in log entries shared in CloudTrail trails.

Several operations return structures that appear similar, but have different purposes. As new functionality is added to the product, the structure used in a parameter of one operation might need to change in a way that wouldn't make sense for the same parameter in a different operation. To help you understand the purpose of each, the following naming convention is used for the structures:

", "operations": { - "CreateIdentitySource": "

Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

To reference a user from this identity source in your Cedar policies, use the following syntax.

IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

", - "CreatePolicy": "

Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

", - "CreatePolicyStore": "

Creates a policy store. A policy store is a container for policy resources.

Although Cedar supports multiple namespaces, Verified Permissions currently supports only one namespace per policy store.

", - "CreatePolicyTemplate": "

Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

", + "CreateIdentitySource": "

Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

To reference a user from this identity source in your Cedar policies, use the following syntax.

IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", + "CreatePolicy": "

Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", + "CreatePolicyStore": "

Creates a policy store. A policy store is a container for policy resources.

Although Cedar supports multiple namespaces, Verified Permissions currently supports only one namespace per policy store.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", + "CreatePolicyTemplate": "

Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", "DeleteIdentitySource": "

Deletes an identity source that references an identity provider (IdP) such as Amazon Cognito. After you delete the identity source, you can no longer use tokens for identities from that identity source to represent principals in authorization queries made using IsAuthorizedWithToken. operations.

", "DeletePolicy": "

Deletes the specified policy from the policy store.

This operation is idempotent; if you specify a policy that doesn't exist, the request response returns a successful HTTP 200 status code.

", "DeletePolicyStore": "

Deletes the specified policy store.

This operation is idempotent. If you specify a policy store that does not exist, the request response will still return a successful HTTP 200 status code.

", @@ -21,11 +21,11 @@ "ListPolicies": "

Returns a paginated list of all policies stored in the specified policy store.

", "ListPolicyStores": "

Returns a paginated list of all policy stores in the calling Amazon Web Services account.

", "ListPolicyTemplates": "

Returns a paginated list of all policy templates in the specified policy store.

", - "PutSchema": "

Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

", - "UpdateIdentitySource": "

Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

", - "UpdatePolicy": "

Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

  • If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

  • When you edit a static policy, You can change only certain elements of a static policy:

    • The action referenced by the policy.

    • A condition clause, such as when and unless.

    You can't change these elements of a static policy:

    • Changing a policy from a static policy to a template-linked policy.

    • Changing the effect of a static policy from permit or forbid.

    • The principal referenced by a static policy.

    • The resource referenced by a static policy.

  • To update a template-linked policy, you must update the template instead.

", - "UpdatePolicyStore": "

Modifies the validation setting for a policy store.

", - "UpdatePolicyTemplate": "

Updates the specified policy template. You can update only the description and the some elements of the policyBody.

Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

" + "PutSchema": "

Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", + "UpdateIdentitySource": "

Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", + "UpdatePolicy": "

Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

  • If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

  • When you edit a static policy, You can change only certain elements of a static policy:

    • The action referenced by the policy.

    • A condition clause, such as when and unless.

    You can't change these elements of a static policy:

    • Changing a policy from a static policy to a template-linked policy.

    • Changing the effect of a static policy from permit or forbid.

    • The principal referenced by a static policy.

    • The resource referenced by a static policy.

  • To update a template-linked policy, you must update the template instead.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", + "UpdatePolicyStore": "

Modifies the validation setting for a policy store.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

", + "UpdatePolicyTemplate": "

Updates the specified policy template. You can update only the description and the some elements of the policyBody.

Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

" }, "shapes": { "AccessDeniedException": { @@ -271,7 +271,7 @@ } }, "EntityItem": { - "base": "

Contains information about an entity that can be referenced in a Cedar policy.

This data type is used as one of the fields in the EntitiesDefinition structure.

{ \"id\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"Attributes\": {}, \"Parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

", + "base": "

Contains information about an entity that can be referenced in a Cedar policy.

This data type is used as one of the fields in the EntitiesDefinition structure.

{ \"identifier\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"attributes\": {}, \"parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

", "refs": { "EntityList$member": null } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index a0baba706ae..eae2e98038e 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -2037,6 +2037,12 @@ "deprecated" : true, "hostname" : "athena-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "athena.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "athena.me-central-1.api.aws", @@ -4404,6 +4410,7 @@ "deprecated" : true, "hostname" : "datasync-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, diff --git a/service/ec2/api.go b/service/ec2/api.go index d89c38a1bc6..7e55ef17258 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -187247,6 +187247,54 @@ const ( // InstanceTypeM7iFlex8xlarge is a InstanceType enum value InstanceTypeM7iFlex8xlarge = "m7i-flex.8xlarge" + + // InstanceTypeM7aMedium is a InstanceType enum value + InstanceTypeM7aMedium = "m7a.medium" + + // InstanceTypeM7aLarge is a InstanceType enum value + InstanceTypeM7aLarge = "m7a.large" + + // InstanceTypeM7aXlarge is a InstanceType enum value + InstanceTypeM7aXlarge = "m7a.xlarge" + + // InstanceTypeM7a2xlarge is a InstanceType enum value + InstanceTypeM7a2xlarge = "m7a.2xlarge" + + // InstanceTypeM7a4xlarge is a InstanceType enum value + InstanceTypeM7a4xlarge = "m7a.4xlarge" + + // InstanceTypeM7a8xlarge is a InstanceType enum value + InstanceTypeM7a8xlarge = "m7a.8xlarge" + + // InstanceTypeM7a12xlarge is a InstanceType enum value + InstanceTypeM7a12xlarge = "m7a.12xlarge" + + // InstanceTypeM7a16xlarge is a InstanceType enum value + InstanceTypeM7a16xlarge = "m7a.16xlarge" + + // InstanceTypeM7a24xlarge is a InstanceType enum value + InstanceTypeM7a24xlarge = "m7a.24xlarge" + + // InstanceTypeM7a32xlarge is a InstanceType enum value + InstanceTypeM7a32xlarge = "m7a.32xlarge" + + // InstanceTypeM7a48xlarge is a InstanceType enum value + InstanceTypeM7a48xlarge = "m7a.48xlarge" + + // InstanceTypeM7aMetal48xl is a InstanceType enum value + InstanceTypeM7aMetal48xl = "m7a.metal-48xl" + + // InstanceTypeHpc7a12xlarge is a InstanceType enum value + InstanceTypeHpc7a12xlarge = "hpc7a.12xlarge" + + // InstanceTypeHpc7a24xlarge is a InstanceType enum value + InstanceTypeHpc7a24xlarge = "hpc7a.24xlarge" + + // InstanceTypeHpc7a48xlarge is a InstanceType enum value + InstanceTypeHpc7a48xlarge = "hpc7a.48xlarge" + + // InstanceTypeHpc7a96xlarge is a InstanceType enum value + InstanceTypeHpc7a96xlarge = "hpc7a.96xlarge" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -187932,6 +187980,22 @@ func InstanceType_Values() []string { InstanceTypeM7iFlex2xlarge, InstanceTypeM7iFlex4xlarge, InstanceTypeM7iFlex8xlarge, + InstanceTypeM7aMedium, + InstanceTypeM7aLarge, + InstanceTypeM7aXlarge, + InstanceTypeM7a2xlarge, + InstanceTypeM7a4xlarge, + InstanceTypeM7a8xlarge, + InstanceTypeM7a12xlarge, + InstanceTypeM7a16xlarge, + InstanceTypeM7a24xlarge, + InstanceTypeM7a32xlarge, + InstanceTypeM7a48xlarge, + InstanceTypeM7aMetal48xl, + InstanceTypeHpc7a12xlarge, + InstanceTypeHpc7a24xlarge, + InstanceTypeHpc7a48xlarge, + InstanceTypeHpc7a96xlarge, } } diff --git a/service/glue/api.go b/service/glue/api.go index dcc5b0a781d..14c4f77ce25 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -63950,12 +63950,18 @@ type Session struct { // The command object.See SessionCommand. Command *SessionCommand `type:"structure"` + // The date and time that this session is completed. + CompletedOn *time.Time `type:"timestamp"` + // The number of connections used for the session. Connections *ConnectionsList `type:"structure"` // The time and date when the session was created. CreatedOn *time.Time `type:"timestamp"` + // The DPUs consumed by the session (formula: ExecutionTime * MaxCapacity). + DPUSeconds *float64 `type:"double"` + // A map array of key-value pairs. Max is 75 pairs. DefaultArguments map[string]*string `type:"map"` @@ -63965,6 +63971,9 @@ type Session struct { // The error message displayed during the session. ErrorMessage *string `type:"string"` + // The total time the session ran for. + ExecutionTime *float64 `type:"double"` + // The Glue version determines the versions of Apache Spark and Python that // Glue supports. The GlueVersion must be greater than 2.0. GlueVersion *string `min:"1" type:"string"` @@ -63972,11 +63981,17 @@ type Session struct { // The ID of the session. Id *string `min:"1" type:"string"` + // The number of minutes when idle before the session times out. + IdleTimeout *int64 `type:"integer"` + // The number of Glue data processing units (DPUs) that can be allocated when // the job runs. A DPU is a relative measure of processing power that consists // of 4 vCPUs of compute capacity and 16 GB memory. MaxCapacity *float64 `type:"double"` + // The number of workers of a defined WorkerType to use for the session. + NumberOfWorkers *int64 `type:"integer"` + // The code execution progress of the session. Progress *float64 `type:"double"` @@ -63989,6 +64004,11 @@ type Session struct { // The session status. Status *string `type:"string" enum:"SessionStatus"` + + // The type of predefined worker that is allocated when a session runs. Accepts + // a value of G.1X, G.2X, G.4X, or G.8X for Spark sessions. Accepts the value + // Z.2X for Ray sessions. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation. @@ -64015,6 +64035,12 @@ func (s *Session) SetCommand(v *SessionCommand) *Session { return s } +// SetCompletedOn sets the CompletedOn field's value. +func (s *Session) SetCompletedOn(v time.Time) *Session { + s.CompletedOn = &v + return s +} + // SetConnections sets the Connections field's value. func (s *Session) SetConnections(v *ConnectionsList) *Session { s.Connections = v @@ -64027,6 +64053,12 @@ func (s *Session) SetCreatedOn(v time.Time) *Session { return s } +// SetDPUSeconds sets the DPUSeconds field's value. +func (s *Session) SetDPUSeconds(v float64) *Session { + s.DPUSeconds = &v + return s +} + // SetDefaultArguments sets the DefaultArguments field's value. func (s *Session) SetDefaultArguments(v map[string]*string) *Session { s.DefaultArguments = v @@ -64045,6 +64077,12 @@ func (s *Session) SetErrorMessage(v string) *Session { return s } +// SetExecutionTime sets the ExecutionTime field's value. +func (s *Session) SetExecutionTime(v float64) *Session { + s.ExecutionTime = &v + return s +} + // SetGlueVersion sets the GlueVersion field's value. func (s *Session) SetGlueVersion(v string) *Session { s.GlueVersion = &v @@ -64057,12 +64095,24 @@ func (s *Session) SetId(v string) *Session { return s } +// SetIdleTimeout sets the IdleTimeout field's value. +func (s *Session) SetIdleTimeout(v int64) *Session { + s.IdleTimeout = &v + return s +} + // SetMaxCapacity sets the MaxCapacity field's value. func (s *Session) SetMaxCapacity(v float64) *Session { s.MaxCapacity = &v return s } +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *Session) SetNumberOfWorkers(v int64) *Session { + s.NumberOfWorkers = &v + return s +} + // SetProgress sets the Progress field's value. func (s *Session) SetProgress(v float64) *Session { s.Progress = &v @@ -64087,6 +64137,12 @@ func (s *Session) SetStatus(v string) *Session { return s } +// SetWorkerType sets the WorkerType field's value. +func (s *Session) SetWorkerType(v string) *Session { + s.WorkerType = &v + return s +} + // The SessionCommand that runs the job. type SessionCommand struct { _ struct{} `type:"structure"` diff --git a/service/mediaconvert/api.go b/service/mediaconvert/api.go index c0e41b5728a..f1be32c84cd 100644 --- a/service/mediaconvert/api.go +++ b/service/mediaconvert/api.go @@ -1184,7 +1184,7 @@ func (c *MediaConvert) GetJobRequest(input *GetJobInput) (req *request.Request, // GetJob API operation for AWS Elemental MediaConvert. // -// Retrieve the JSON for a specific completed transcoding job. +// Retrieve the JSON for a specific transcoding job. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3572,6 +3572,10 @@ type AudioCodecSettings struct { // Required when you set Codec to the value EAC3. Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"` + // Required when you set Codec, under AudioDescriptions>CodecSettings, to the + // value FLAC. + FlacSettings *FlacSettings `locationName:"flacSettings" type:"structure"` + // Required when you set Codec to the value MP2. Mp2Settings *Mp2Settings `locationName:"mp2Settings" type:"structure"` @@ -3637,6 +3641,11 @@ func (s *AudioCodecSettings) Validate() error { invalidParams.AddNested("Eac3Settings", err.(request.ErrInvalidParams)) } } + if s.FlacSettings != nil { + if err := s.FlacSettings.Validate(); err != nil { + invalidParams.AddNested("FlacSettings", err.(request.ErrInvalidParams)) + } + } if s.Mp2Settings != nil { if err := s.Mp2Settings.Validate(); err != nil { invalidParams.AddNested("Mp2Settings", err.(request.ErrInvalidParams)) @@ -3705,6 +3714,12 @@ func (s *AudioCodecSettings) SetEac3Settings(v *Eac3Settings) *AudioCodecSetting return s } +// SetFlacSettings sets the FlacSettings field's value. +func (s *AudioCodecSettings) SetFlacSettings(v *FlacSettings) *AudioCodecSettings { + s.FlacSettings = v + return s +} + // SetMp2Settings sets the Mp2Settings field's value. func (s *AudioCodecSettings) SetMp2Settings(v *Mp2Settings) *AudioCodecSettings { s.Mp2Settings = v @@ -4440,11 +4455,12 @@ func (s *AutomatedAbrRule) SetType(v string) *AutomatedAbrRule { type AutomatedAbrSettings struct { _ struct{} `type:"structure"` - // Optional. The maximum target bit rate used in your automated ABR stack. Use - // this value to set an upper limit on the bandwidth consumed by the highest-quality - // rendition. This is the rendition that is delivered to viewers with the fastest - // internet connections. If you don't specify a value, MediaConvert uses 8,000,000 - // (8 mb/s) by default. + // Specify the maximum average bitrate for MediaConvert to use in your automated + // ABR stack. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) + // by default. The average bitrate of your highest-quality rendition will be + // equal to or below this value, depending on the quality, complexity, and resolution + // of your content. Note that the instantaneous maximum bitrate may vary above + // the value that you specify. MaxAbrBitrate *int64 `locationName:"maxAbrBitrate" min:"100000" type:"integer"` // Optional. The maximum number of renditions that MediaConvert will create @@ -4454,10 +4470,11 @@ type AutomatedAbrSettings struct { // your JSON job specification, MediaConvert defaults to a limit of 15. MaxRenditions *int64 `locationName:"maxRenditions" min:"3" type:"integer"` - // Optional. The minimum target bitrate used in your automated ABR stack. Use - // this value to set a lower limit on the bitrate of video delivered to viewers - // with slow internet connections. If you don't specify a value, MediaConvert - // uses 600,000 (600 kb/s) by default. + // Specify the minimum average bitrate for MediaConvert to use in your automated + // ABR stack. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) + // by default. The average bitrate of your lowest-quality rendition will be + // near this value. Note that the instantaneous minimum bitrate may vary below + // the value that you specify. MinAbrBitrate *int64 `locationName:"minAbrBitrate" min:"100000" type:"integer"` // Optional. Use Automated ABR rules to specify restrictions for the rendition @@ -4672,6 +4689,14 @@ type Av1Settings struct { // Specify the Bit depth. You can choose 8-bit or 10-bit. BitDepth *string `locationName:"bitDepth" type:"string" enum:"Av1BitDepth"` + // Film grain synthesis replaces film grain present in your content with similar + // quality synthesized AV1 film grain. We recommend that you choose Enabled + // to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. + // For QVBR quality level 9 or 10 outputs we recommend that you keep the default + // value, Disabled. When you include Film grain synthesis, you cannot include + // the Noise reducer preprocessor. + FilmGrainSynthesis *string `locationName:"filmGrainSynthesis" type:"string" enum:"Av1FilmGrainSynthesis"` + // Use the Framerate setting to specify the frame rate for this output. If you // want to keep the same frame rate as the input video, choose Follow source. // If you want to do frame rate conversion, choose a frame rate from the dropdown @@ -4812,6 +4837,12 @@ func (s *Av1Settings) SetBitDepth(v string) *Av1Settings { return s } +// SetFilmGrainSynthesis sets the FilmGrainSynthesis field's value. +func (s *Av1Settings) SetFilmGrainSynthesis(v string) *Av1Settings { + s.FilmGrainSynthesis = &v + return s +} + // SetFramerateControl sets the FramerateControl field's value. func (s *Av1Settings) SetFramerateControl(v string) *Av1Settings { s.FramerateControl = &v @@ -11480,6 +11511,79 @@ func (s *FileSourceSettings) SetTimeDeltaUnits(v string) *FileSourceSettings { return s } +// Required when you set Codec, under AudioDescriptions>CodecSettings, to the +// value FLAC. +type FlacSettings struct { + _ struct{} `type:"structure"` + + // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding + // quality for this audio track. + BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` + + // Specify the number of channels in this output audio track. Choosing Mono + // on the console gives you 1 output channel; choosing Stereo gives you 2. In + // the API, valid values are between 1 and 8. + Channels *int64 `locationName:"channels" min:"1" type:"integer"` + + // Sample rate in hz. + SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FlacSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FlacSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FlacSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FlacSettings"} + if s.BitDepth != nil && *s.BitDepth < 16 { + invalidParams.Add(request.NewErrParamMinValue("BitDepth", 16)) + } + if s.Channels != nil && *s.Channels < 1 { + invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) + } + if s.SampleRate != nil && *s.SampleRate < 22050 { + invalidParams.Add(request.NewErrParamMinValue("SampleRate", 22050)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBitDepth sets the BitDepth field's value. +func (s *FlacSettings) SetBitDepth(v int64) *FlacSettings { + s.BitDepth = &v + return s +} + +// SetChannels sets the Channels field's value. +func (s *FlacSettings) SetChannels(v int64) *FlacSettings { + s.Channels = &v + return s +} + +// SetSampleRate sets the SampleRate field's value. +func (s *FlacSettings) SetSampleRate(v int64) *FlacSettings { + s.SampleRate = &v + return s +} + type ForbiddenException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -18107,6 +18211,20 @@ type M2tsSettings struct { // data. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` + // Manually specify the initial PTS offset, in seconds, when you set PTS offset + // to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default + // value 2. + PtsOffset *int64 `locationName:"ptsOffset" type:"integer"` + + // Specify the initial presentation timestamp (PTS) offset for your transport + // stream output. To let MediaConvert automatically determine the initial PTS + // offset: Keep the default value, Auto. We recommend that you choose Auto for + // the widest player compatibility. The initial PTS will be at least two seconds + // and vary depending on your output's bitrate, HRD buffer size and HRD buffer + // initial fill percentage. To manually specify an initial PTS offset: Choose + // Seconds. Then specify the number of seconds with PTS offset. + PtsOffsetMode *string `locationName:"ptsOffsetMode" type:"string" enum:"TsPtsOffset"` + // When set to CBR, inserts null packets into transport stream to fill specified // bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, // but the output will not be padded up to that bitrate. @@ -18411,6 +18529,18 @@ func (s *M2tsSettings) SetProgramNumber(v int64) *M2tsSettings { return s } +// SetPtsOffset sets the PtsOffset field's value. +func (s *M2tsSettings) SetPtsOffset(v int64) *M2tsSettings { + s.PtsOffset = &v + return s +} + +// SetPtsOffsetMode sets the PtsOffsetMode field's value. +func (s *M2tsSettings) SetPtsOffsetMode(v string) *M2tsSettings { + s.PtsOffsetMode = &v + return s +} + // SetRateMode sets the RateMode field's value. func (s *M2tsSettings) SetRateMode(v string) *M2tsSettings { s.RateMode = &v @@ -18541,6 +18671,20 @@ type M3u8Settings struct { // The value of the program number field in the Program Map Table. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` + // Manually specify the initial PTS offset, in seconds, when you set PTS offset + // to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default + // value 2. + PtsOffset *int64 `locationName:"ptsOffset" type:"integer"` + + // Specify the initial presentation timestamp (PTS) offset for your transport + // stream output. To let MediaConvert automatically determine the initial PTS + // offset: Keep the default value, Auto. We recommend that you choose Auto for + // the widest player compatibility. The initial PTS will be at least two seconds + // and vary depending on your output's bitrate, HRD buffer size and HRD buffer + // initial fill percentage. To manually specify an initial PTS offset: Choose + // Seconds. Then specify the number of seconds with PTS offset. + PtsOffsetMode *string `locationName:"ptsOffsetMode" type:"string" enum:"TsPtsOffset"` + // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` @@ -18693,6 +18837,18 @@ func (s *M3u8Settings) SetProgramNumber(v int64) *M3u8Settings { return s } +// SetPtsOffset sets the PtsOffset field's value. +func (s *M3u8Settings) SetPtsOffset(v int64) *M3u8Settings { + s.PtsOffset = &v + return s +} + +// SetPtsOffsetMode sets the PtsOffsetMode field's value. +func (s *M3u8Settings) SetPtsOffsetMode(v string) *M3u8Settings { + s.PtsOffsetMode = &v + return s +} + // SetScte35Pid sets the Scte35Pid field's value. func (s *M3u8Settings) SetScte35Pid(v int64) *M3u8Settings { s.Scte35Pid = &v @@ -22943,6 +23099,9 @@ type S3DestinationSettings struct { // Settings for how your job outputs are encrypted as they are uploaded to Amazon // S3. Encryption *S3EncryptionSettings `locationName:"encryption" type:"structure"` + + // Specify the S3 storage class to use for this destination. + StorageClass *string `locationName:"storageClass" type:"string" enum:"S3StorageClass"` } // String returns the string representation. @@ -22975,6 +23134,12 @@ func (s *S3DestinationSettings) SetEncryption(v *S3EncryptionSettings) *S3Destin return s } +// SetStorageClass sets the StorageClass field's value. +func (s *S3DestinationSettings) SetStorageClass(v string) *S3DestinationSettings { + s.StorageClass = &v + return s +} + // Settings for how your job outputs are encrypted as they are uploaded to Amazon // S3. type S3EncryptionSettings struct { @@ -27520,6 +27685,45 @@ const ( // AudioChannelTagVhr is a AudioChannelTag enum value AudioChannelTagVhr = "VHR" + + // AudioChannelTagTbl is a AudioChannelTag enum value + AudioChannelTagTbl = "TBL" + + // AudioChannelTagTbc is a AudioChannelTag enum value + AudioChannelTagTbc = "TBC" + + // AudioChannelTagTbr is a AudioChannelTag enum value + AudioChannelTagTbr = "TBR" + + // AudioChannelTagRsl is a AudioChannelTag enum value + AudioChannelTagRsl = "RSL" + + // AudioChannelTagRsr is a AudioChannelTag enum value + AudioChannelTagRsr = "RSR" + + // AudioChannelTagLw is a AudioChannelTag enum value + AudioChannelTagLw = "LW" + + // AudioChannelTagRw is a AudioChannelTag enum value + AudioChannelTagRw = "RW" + + // AudioChannelTagLfe2 is a AudioChannelTag enum value + AudioChannelTagLfe2 = "LFE2" + + // AudioChannelTagLt is a AudioChannelTag enum value + AudioChannelTagLt = "LT" + + // AudioChannelTagRt is a AudioChannelTag enum value + AudioChannelTagRt = "RT" + + // AudioChannelTagHi is a AudioChannelTag enum value + AudioChannelTagHi = "HI" + + // AudioChannelTagNar is a AudioChannelTag enum value + AudioChannelTagNar = "NAR" + + // AudioChannelTagM is a AudioChannelTag enum value + AudioChannelTagM = "M" ) // AudioChannelTag_Values returns all elements of the AudioChannelTag enum @@ -27540,6 +27744,19 @@ func AudioChannelTag_Values() []string { AudioChannelTagVhl, AudioChannelTagVhc, AudioChannelTagVhr, + AudioChannelTagTbl, + AudioChannelTagTbc, + AudioChannelTagTbr, + AudioChannelTagRsl, + AudioChannelTagRsr, + AudioChannelTagLw, + AudioChannelTagRw, + AudioChannelTagLfe2, + AudioChannelTagLt, + AudioChannelTagRt, + AudioChannelTagHi, + AudioChannelTagNar, + AudioChannelTagM, } } @@ -27584,6 +27801,9 @@ const ( // AudioCodecPassthrough is a AudioCodec enum value AudioCodecPassthrough = "PASSTHROUGH" + + // AudioCodecFlac is a AudioCodec enum value + AudioCodecFlac = "FLAC" ) // AudioCodec_Values returns all elements of the AudioCodec enum @@ -27600,6 +27820,7 @@ func AudioCodec_Values() []string { AudioCodecVorbis, AudioCodecOpus, AudioCodecPassthrough, + AudioCodecFlac, } } @@ -27866,6 +28087,28 @@ func Av1BitDepth_Values() []string { } } +// Film grain synthesis replaces film grain present in your content with similar +// quality synthesized AV1 film grain. We recommend that you choose Enabled +// to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. +// For QVBR quality level 9 or 10 outputs we recommend that you keep the default +// value, Disabled. When you include Film grain synthesis, you cannot include +// the Noise reducer preprocessor. +const ( + // Av1FilmGrainSynthesisDisabled is a Av1FilmGrainSynthesis enum value + Av1FilmGrainSynthesisDisabled = "DISABLED" + + // Av1FilmGrainSynthesisEnabled is a Av1FilmGrainSynthesis enum value + Av1FilmGrainSynthesisEnabled = "ENABLED" +) + +// Av1FilmGrainSynthesis_Values returns all elements of the Av1FilmGrainSynthesis enum +func Av1FilmGrainSynthesis_Values() []string { + return []string{ + Av1FilmGrainSynthesisDisabled, + Av1FilmGrainSynthesisEnabled, + } +} + // Use the Framerate setting to specify the frame rate for this output. If you // want to keep the same frame rate as the input video, choose Follow source. // If you want to do frame rate conversion, choose a frame rate from the dropdown @@ -36420,6 +36663,43 @@ func S3ServerSideEncryptionType_Values() []string { } } +// Specify the S3 storage class to use for this destination. +const ( + // S3StorageClassStandard is a S3StorageClass enum value + S3StorageClassStandard = "STANDARD" + + // S3StorageClassReducedRedundancy is a S3StorageClass enum value + S3StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // S3StorageClassStandardIa is a S3StorageClass enum value + S3StorageClassStandardIa = "STANDARD_IA" + + // S3StorageClassOnezoneIa is a S3StorageClass enum value + S3StorageClassOnezoneIa = "ONEZONE_IA" + + // S3StorageClassIntelligentTiering is a S3StorageClass enum value + S3StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // S3StorageClassGlacier is a S3StorageClass enum value + S3StorageClassGlacier = "GLACIER" + + // S3StorageClassDeepArchive is a S3StorageClass enum value + S3StorageClassDeepArchive = "DEEP_ARCHIVE" +) + +// S3StorageClass_Values returns all elements of the S3StorageClass enum +func S3StorageClass_Values() []string { + return []string{ + S3StorageClassStandard, + S3StorageClassReducedRedundancy, + S3StorageClassStandardIa, + S3StorageClassOnezoneIa, + S3StorageClassIntelligentTiering, + S3StorageClassGlacier, + S3StorageClassDeepArchive, + } +} + // Specify how MediaConvert limits the color sample range for this output. To // create a limited range output from a full range input: Choose Limited range // squeeze. For full range inputs, MediaConvert performs a linear offset to @@ -36745,6 +37025,29 @@ func TimedMetadata_Values() []string { } } +// Specify the initial presentation timestamp (PTS) offset for your transport +// stream output. To let MediaConvert automatically determine the initial PTS +// offset: Keep the default value, Auto. We recommend that you choose Auto for +// the widest player compatibility. The initial PTS will be at least two seconds +// and vary depending on your output's bitrate, HRD buffer size and HRD buffer +// initial fill percentage. To manually specify an initial PTS offset: Choose +// Seconds. Then specify the number of seconds with PTS offset. +const ( + // TsPtsOffsetAuto is a TsPtsOffset enum value + TsPtsOffsetAuto = "AUTO" + + // TsPtsOffsetSeconds is a TsPtsOffset enum value + TsPtsOffsetSeconds = "SECONDS" +) + +// TsPtsOffset_Values returns all elements of the TsPtsOffset enum +func TsPtsOffset_Values() []string { + return []string{ + TsPtsOffsetAuto, + TsPtsOffsetSeconds, + } +} + // Pass through style and position information from a TTML-like input source // (TTML, IMSC, SMPTE-TT) to the TTML output. const ( diff --git a/service/medialive/api.go b/service/medialive/api.go index 3b27dab60d2..a406d3ea099 100644 --- a/service/medialive/api.go +++ b/service/medialive/api.go @@ -6342,6 +6342,10 @@ func (s *AacSettings) SetVbrQuality(v string) *AacSettings { type Ac3Settings struct { _ struct{} `type:"structure"` + // Applies a 3 dB attenuation to the surround channels. Applies only when the + // coding mode parameter is CODING_MODE_3_2_LFE. + AttenuationControl *string `locationName:"attenuationControl" type:"string" enum:"Ac3AttenuationControl"` + // Average bitrate in bits/second. Valid bitrates depend on the coding mode. Bitrate *float64 `locationName:"bitrate" type:"double"` @@ -6401,6 +6405,12 @@ func (s *Ac3Settings) Validate() error { return nil } +// SetAttenuationControl sets the AttenuationControl field's value. +func (s *Ac3Settings) SetAttenuationControl(v string) *Ac3Settings { + s.AttenuationControl = &v + return s +} + // SetBitrate sets the Bitrate field's value. func (s *Ac3Settings) SetBitrate(v float64) *Ac3Settings { s.Bitrate = &v @@ -22821,6 +22831,16 @@ type M3u8Settings struct { // This parameter is unused and deprecated. EcmPid *string `locationName:"ecmPid" type:"string"` + // If set to passthrough, passes any KLV data from the input source to this + // output. + KlvBehavior *string `locationName:"klvBehavior" type:"string" enum:"M3u8KlvBehavior"` + + // Packet Identifier (PID) for input source KLV data to this output. Multiple + // values are accepted, and can be entered in ranges and/or by comma separation. + // Can be entered as decimal or hexadecimal values. Each PID specified must + // be in the range of 32 (or 0x20)..8182 (or 0x1ff6). + KlvDataPids *string `locationName:"klvDataPids" type:"string"` + // If set to passthrough, Nielsen inaudible tones for media tracking will be // detected in the input audio and an equivalent ID3 tag will be inserted in // the output. @@ -22915,6 +22935,18 @@ func (s *M3u8Settings) SetEcmPid(v string) *M3u8Settings { return s } +// SetKlvBehavior sets the KlvBehavior field's value. +func (s *M3u8Settings) SetKlvBehavior(v string) *M3u8Settings { + s.KlvBehavior = &v + return s +} + +// SetKlvDataPids sets the KlvDataPids field's value. +func (s *M3u8Settings) SetKlvDataPids(v string) *M3u8Settings { + s.KlvDataPids = &v + return s +} + // SetNielsenId3Behavior sets the NielsenId3Behavior field's value. func (s *M3u8Settings) SetNielsenId3Behavior(v string) *M3u8Settings { s.NielsenId3Behavior = &v @@ -27354,6 +27386,13 @@ type RtmpGroupSettings struct { // 1 video will be passed. CaptionData *string `locationName:"captionData" type:"string" enum:"RtmpCaptionData"` + // Applies only when the rate control mode (in the codec settings) is CBR (constant + // bit rate). Controls whether the RTMP output stream is padded (with FILL NAL + // units) in order to achieve a constant bit rate that is truly constant. When + // there is no padding, the bandwidth varies (up to the bitrate value in the + // codec settings). We recommend that you choose Auto. + IncludeFillerNalUnits *string `locationName:"includeFillerNalUnits" type:"string" enum:"IncludeFillerNalUnits"` + // Controls the behavior of this RTMP group if input becomes unavailable.- emitOutput: // Emit a slate until input returns.- pauseOutput: Stop transmitting data until // input returns. This does not close the underlying RTMP connection. @@ -27425,6 +27464,12 @@ func (s *RtmpGroupSettings) SetCaptionData(v string) *RtmpGroupSettings { return s } +// SetIncludeFillerNalUnits sets the IncludeFillerNalUnits field's value. +func (s *RtmpGroupSettings) SetIncludeFillerNalUnits(v string) *RtmpGroupSettings { + s.IncludeFillerNalUnits = &v + return s +} + // SetInputLossAction sets the InputLossAction field's value. func (s *RtmpGroupSettings) SetInputLossAction(v string) *RtmpGroupSettings { s.InputLossAction = &v @@ -32869,6 +32914,23 @@ func AacVbrQuality_Values() []string { } } +// Ac3 Attenuation Control +const ( + // Ac3AttenuationControlAttenuate3Db is a Ac3AttenuationControl enum value + Ac3AttenuationControlAttenuate3Db = "ATTENUATE_3_DB" + + // Ac3AttenuationControlNone is a Ac3AttenuationControl enum value + Ac3AttenuationControlNone = "NONE" +) + +// Ac3AttenuationControl_Values returns all elements of the Ac3AttenuationControl enum +func Ac3AttenuationControl_Values() []string { + return []string{ + Ac3AttenuationControlAttenuate3Db, + Ac3AttenuationControlNone, + } +} + // Ac3 Bitstream Mode const ( // Ac3BitstreamModeCommentary is a Ac3BitstreamMode enum value @@ -35730,6 +35792,27 @@ func IFrameOnlyPlaylistType_Values() []string { } } +// Include Filler Nal Units +const ( + // IncludeFillerNalUnitsAuto is a IncludeFillerNalUnits enum value + IncludeFillerNalUnitsAuto = "AUTO" + + // IncludeFillerNalUnitsDrop is a IncludeFillerNalUnits enum value + IncludeFillerNalUnitsDrop = "DROP" + + // IncludeFillerNalUnitsInclude is a IncludeFillerNalUnits enum value + IncludeFillerNalUnitsInclude = "INCLUDE" +) + +// IncludeFillerNalUnits_Values returns all elements of the IncludeFillerNalUnits enum +func IncludeFillerNalUnits_Values() []string { + return []string{ + IncludeFillerNalUnitsAuto, + IncludeFillerNalUnitsDrop, + IncludeFillerNalUnitsInclude, + } +} + // A standard input has two sources and a single pipeline input only has one. const ( // InputClassStandard is a InputClass enum value @@ -36668,6 +36751,23 @@ func M2tsTimedMetadataBehavior_Values() []string { } } +// M3u8 Klv Behavior +const ( + // M3u8KlvBehaviorNoPassthrough is a M3u8KlvBehavior enum value + M3u8KlvBehaviorNoPassthrough = "NO_PASSTHROUGH" + + // M3u8KlvBehaviorPassthrough is a M3u8KlvBehavior enum value + M3u8KlvBehaviorPassthrough = "PASSTHROUGH" +) + +// M3u8KlvBehavior_Values returns all elements of the M3u8KlvBehavior enum +func M3u8KlvBehavior_Values() []string { + return []string{ + M3u8KlvBehaviorNoPassthrough, + M3u8KlvBehaviorPassthrough, + } +} + // M3u8 Nielsen Id3 Behavior const ( // M3u8NielsenId3BehaviorNoPassthrough is a M3u8NielsenId3Behavior enum value diff --git a/service/mediatailor/api.go b/service/mediatailor/api.go index 0c0ff2c69fe..8439c1c7fe6 100644 --- a/service/mediatailor/api.go +++ b/service/mediatailor/api.go @@ -3774,7 +3774,7 @@ type AccessConfiguration struct { _ struct{} `type:"structure"` // The type of authentication used to access content from HttpConfiguration::BaseUrl - // on your source location. Accepted value: S3_SIGV4. + // on your source location. // // S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style // access. If your source location base URL is an Amazon S3 bucket, MediaTailor @@ -3795,6 +3795,40 @@ type AccessConfiguration struct { // • The caller of the API must have s3:GetObject IAM permissions to read // all top level manifests referenced by your MediaTailor VodSource packaging // configurations. + // + // AUTODETECT_SIGV4 - AWS Signature Version 4 authentication for a set of supported + // services: MediaPackage Version 2 and Amazon S3 hosted virtual-style access. + // If your source location base URL is a MediaPackage Version 2 endpoint or + // an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) + // authentication to access the resource where your source content is stored. + // + // Before you can use AUTODETECT_SIGV4 with a MediaPackage Version 2 endpoint, + // you must meet these requirements: + // + // • You must grant MediaTailor access to your MediaPackage endpoint by granting + // mediatailor.amazonaws.com principal access in an Origin Access policy on + // the endpoint. + // + // • Your MediaTailor source location base URL must be a MediaPackage V2 endpoint. + // + // • The caller of the API must have mediapackagev2:GetObject IAM permissions + // to read all top level manifests referenced by the MediaTailor source packaging + // configurations. + // + // Before you can use AUTODETECT_SIGV4 with an Amazon S3 bucket, you must meet + // these requirements: + // + // • You must grant MediaTailor access to your S3 bucket by granting mediatailor.amazonaws.com + // principal access in IAM. For more information about configuring access in + // IAM, see Access management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) + // in the IAM User Guide.. + // + // • The mediatailor.amazonaws.com service principal must have permissions + // to read all top-level manifests referenced by the VodSource packaging configurations. + // + // • The caller of the API must have s3:GetObject IAM permissions to read + // all top level manifests referenced by your MediaTailor VodSource packaging + // configurations. AccessType *string `type:"string" enum:"AccessType"` // AWS Secrets Manager access token configuration parameters. @@ -13255,6 +13289,9 @@ const ( // AccessTypeSecretsManagerAccessToken is a AccessType enum value AccessTypeSecretsManagerAccessToken = "SECRETS_MANAGER_ACCESS_TOKEN" + + // AccessTypeAutodetectSigv4 is a AccessType enum value + AccessTypeAutodetectSigv4 = "AUTODETECT_SIGV4" ) // AccessType_Values returns all elements of the AccessType enum @@ -13262,6 +13299,7 @@ func AccessType_Values() []string { return []string{ AccessTypeS3Sigv4, AccessTypeSecretsManagerAccessToken, + AccessTypeAutodetectSigv4, } } diff --git a/service/quicksight/api.go b/service/quicksight/api.go index d13ef162cfb..026881c2a03 100644 --- a/service/quicksight/api.go +++ b/service/quicksight/api.go @@ -6733,6 +6733,12 @@ func (c *QuickSight) DescribeFolderPermissionsRequest(input *DescribeFolderPermi Name: opDescribeFolderPermissions, HTTPMethod: "GET", HTTPPath: "/accounts/{AwsAccountId}/folders/{FolderId}/permissions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -6773,6 +6779,9 @@ func (c *QuickSight) DescribeFolderPermissionsRequest(input *DescribeFolderPermi // - ThrottlingException // Access is throttled. // +// - InvalidNextTokenException +// The NextToken value isn't valid. +// // - UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -6804,6 +6813,57 @@ func (c *QuickSight) DescribeFolderPermissionsWithContext(ctx aws.Context, input return out, req.Send() } +// DescribeFolderPermissionsPages iterates over the pages of a DescribeFolderPermissions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFolderPermissions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFolderPermissions operation. +// pageNum := 0 +// err := client.DescribeFolderPermissionsPages(params, +// func(page *quicksight.DescribeFolderPermissionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *QuickSight) DescribeFolderPermissionsPages(input *DescribeFolderPermissionsInput, fn func(*DescribeFolderPermissionsOutput, bool) bool) error { + return c.DescribeFolderPermissionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFolderPermissionsPagesWithContext same as DescribeFolderPermissionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) DescribeFolderPermissionsPagesWithContext(ctx aws.Context, input *DescribeFolderPermissionsInput, fn func(*DescribeFolderPermissionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFolderPermissionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFolderPermissionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFolderPermissionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeFolderResolvedPermissions = "DescribeFolderResolvedPermissions" // DescribeFolderResolvedPermissionsRequest generates a "aws/request.Request" representing the @@ -6834,6 +6894,12 @@ func (c *QuickSight) DescribeFolderResolvedPermissionsRequest(input *DescribeFol Name: opDescribeFolderResolvedPermissions, HTTPMethod: "GET", HTTPPath: "/accounts/{AwsAccountId}/folders/{FolderId}/resolved-permissions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -6875,6 +6941,9 @@ func (c *QuickSight) DescribeFolderResolvedPermissionsRequest(input *DescribeFol // - ThrottlingException // Access is throttled. // +// - InvalidNextTokenException +// The NextToken value isn't valid. +// // - UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -6906,6 +6975,57 @@ func (c *QuickSight) DescribeFolderResolvedPermissionsWithContext(ctx aws.Contex return out, req.Send() } +// DescribeFolderResolvedPermissionsPages iterates over the pages of a DescribeFolderResolvedPermissions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFolderResolvedPermissions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFolderResolvedPermissions operation. +// pageNum := 0 +// err := client.DescribeFolderResolvedPermissionsPages(params, +// func(page *quicksight.DescribeFolderResolvedPermissionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *QuickSight) DescribeFolderResolvedPermissionsPages(input *DescribeFolderResolvedPermissionsInput, fn func(*DescribeFolderResolvedPermissionsOutput, bool) bool) error { + return c.DescribeFolderResolvedPermissionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFolderResolvedPermissionsPagesWithContext same as DescribeFolderResolvedPermissionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) DescribeFolderResolvedPermissionsPagesWithContext(ctx aws.Context, input *DescribeFolderResolvedPermissionsInput, fn func(*DescribeFolderResolvedPermissionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFolderResolvedPermissionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFolderResolvedPermissionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFolderResolvedPermissionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeGroup = "DescribeGroup" // DescribeGroupRequest generates a "aws/request.Request" representing the @@ -10572,6 +10692,12 @@ func (c *QuickSight) ListFolderMembersRequest(input *ListFolderMembersInput) (re Name: opListFolderMembers, HTTPMethod: "GET", HTTPPath: "/accounts/{AwsAccountId}/folders/{FolderId}/members", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -10646,6 +10772,57 @@ func (c *QuickSight) ListFolderMembersWithContext(ctx aws.Context, input *ListFo return out, req.Send() } +// ListFolderMembersPages iterates over the pages of a ListFolderMembers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFolderMembers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFolderMembers operation. +// pageNum := 0 +// err := client.ListFolderMembersPages(params, +// func(page *quicksight.ListFolderMembersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *QuickSight) ListFolderMembersPages(input *ListFolderMembersInput, fn func(*ListFolderMembersOutput, bool) bool) error { + return c.ListFolderMembersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListFolderMembersPagesWithContext same as ListFolderMembersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListFolderMembersPagesWithContext(ctx aws.Context, input *ListFolderMembersInput, fn func(*ListFolderMembersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListFolderMembersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListFolderMembersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListFolderMembersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListFolders = "ListFolders" // ListFoldersRequest generates a "aws/request.Request" representing the @@ -10676,6 +10853,12 @@ func (c *QuickSight) ListFoldersRequest(input *ListFoldersInput) (req *request.R Name: opListFolders, HTTPMethod: "GET", HTTPPath: "/accounts/{AwsAccountId}/folders", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -10750,6 +10933,57 @@ func (c *QuickSight) ListFoldersWithContext(ctx aws.Context, input *ListFoldersI return out, req.Send() } +// ListFoldersPages iterates over the pages of a ListFolders operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFolders method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFolders operation. +// pageNum := 0 +// err := client.ListFoldersPages(params, +// func(page *quicksight.ListFoldersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *QuickSight) ListFoldersPages(input *ListFoldersInput, fn func(*ListFoldersOutput, bool) bool) error { + return c.ListFoldersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListFoldersPagesWithContext same as ListFoldersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListFoldersPagesWithContext(ctx aws.Context, input *ListFoldersInput, fn func(*ListFoldersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListFoldersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListFoldersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListFoldersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListGroupMemberships = "ListGroupMemberships" // ListGroupMembershipsRequest generates a "aws/request.Request" representing the @@ -14491,6 +14725,12 @@ func (c *QuickSight) SearchFoldersRequest(input *SearchFoldersInput) (req *reque Name: opSearchFolders, HTTPMethod: "POST", HTTPPath: "/accounts/{AwsAccountId}/search/folders", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -14569,6 +14809,57 @@ func (c *QuickSight) SearchFoldersWithContext(ctx aws.Context, input *SearchFold return out, req.Send() } +// SearchFoldersPages iterates over the pages of a SearchFolders operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchFolders method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchFolders operation. +// pageNum := 0 +// err := client.SearchFoldersPages(params, +// func(page *quicksight.SearchFoldersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *QuickSight) SearchFoldersPages(input *SearchFoldersInput, fn func(*SearchFoldersOutput, bool) bool) error { + return c.SearchFoldersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchFoldersPagesWithContext same as SearchFoldersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) SearchFoldersPagesWithContext(ctx aws.Context, input *SearchFoldersInput, fn func(*SearchFoldersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchFoldersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchFoldersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchFoldersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opSearchGroups = "SearchGroups" // SearchGroupsRequest generates a "aws/request.Request" representing the @@ -15009,6 +15300,8 @@ func (c *QuickSight) StartDashboardSnapshotJobRequest(input *StartDashboardSnaps // // - 1 paginated PDF // +// - 1 Excel workbook +// // - 5 CSVs // // Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once @@ -18814,6 +19107,33 @@ func (s *AggregationSortConfiguration) SetSortDirection(v string) *AggregationSo return s } +// The configuration for applying a filter to all sheets. You can apply this +// filter to all visuals on every sheet. +// +// This is a union type structure. For this structure to be valid, only one +// of the attributes can be defined. +type AllSheetsFilterScopeConfiguration struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AllSheetsFilterScopeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AllSheetsFilterScopeConfiguration) GoString() string { + return s.String() +} + // The parameters for OpenSearch. type AmazonElasticsearchParameters struct { _ struct{} `type:"structure"` @@ -29958,6 +30278,10 @@ type CreateFolderInput struct { // To specify no permissions, omit Permissions. Permissions []*ResourcePermission `min:"1" type:"list"` + // An optional parameter that determines the sharing scope of the folder. The + // default value for this parameter is ACCOUNT. + SharingModel *string `type:"string" enum:"SharingModel"` + // Tags for the folder. Tags []*Tag `min:"1" type:"list"` } @@ -30067,6 +30391,12 @@ func (s *CreateFolderInput) SetPermissions(v []*ResourcePermission) *CreateFolde return s } +// SetSharingModel sets the SharingModel field's value. +func (s *CreateFolderInput) SetSharingModel(v string) *CreateFolderInput { + s.SharingModel = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateFolderInput) SetTags(v []*Tag) *CreateFolderInput { s.Tags = v @@ -30086,12 +30416,12 @@ type CreateFolderMembershipInput struct { // FolderId is a required field FolderId *string `location:"uri" locationName:"FolderId" min:"1" type:"string" required:"true"` - // The ID of the asset (the dashboard, analysis, or dataset). + // The ID of the asset that you want to add to the folder. // // MemberId is a required field MemberId *string `location:"uri" locationName:"MemberId" min:"1" type:"string" required:"true"` - // The type of the member, including DASHBOARD, ANALYSIS, and DATASET. + // The member type of the asset that you want to add to a folder. // // MemberType is a required field MemberType *string `location:"uri" locationName:"MemberType" type:"string" required:"true" enum:"MemberType"` @@ -40393,13 +40723,12 @@ type DeleteFolderMembershipInput struct { // FolderId is a required field FolderId *string `location:"uri" locationName:"FolderId" min:"1" type:"string" required:"true"` - // The ID of the asset (the dashboard, analysis, or dataset) that you want to - // delete. + // The ID of the asset that you want to delete. // // MemberId is a required field MemberId *string `location:"uri" locationName:"MemberId" min:"1" type:"string" required:"true"` - // The type of the member, including DASHBOARD, ANALYSIS, and DATASET + // The member type of the asset that you want to delete from a folder. // // MemberType is a required field MemberType *string `location:"uri" locationName:"MemberType" type:"string" required:"true" enum:"MemberType"` @@ -45391,6 +45720,15 @@ type DescribeFolderPermissionsInput struct { // // FolderId is a required field FolderId *string `location:"uri" locationName:"FolderId" min:"1" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace of the folder whose permissions you want described. + Namespace *string `location:"querystring" locationName:"namespace" type:"string"` + + // A pagination token for the next set of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation. @@ -45426,6 +45764,9 @@ func (s *DescribeFolderPermissionsInput) Validate() error { if s.FolderId != nil && len(*s.FolderId) < 1 { invalidParams.Add(request.NewErrParamMinLen("FolderId", 1)) } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -45445,6 +45786,24 @@ func (s *DescribeFolderPermissionsInput) SetFolderId(v string) *DescribeFolderPe return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFolderPermissionsInput) SetMaxResults(v int64) *DescribeFolderPermissionsInput { + s.MaxResults = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DescribeFolderPermissionsInput) SetNamespace(v string) *DescribeFolderPermissionsInput { + s.Namespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFolderPermissionsInput) SetNextToken(v string) *DescribeFolderPermissionsInput { + s.NextToken = &v + return s +} + type DescribeFolderPermissionsOutput struct { _ struct{} `type:"structure"` @@ -45454,6 +45813,10 @@ type DescribeFolderPermissionsOutput struct { // The ID of the folder. FolderId *string `min:"1" type:"string"` + // The pagination token for the next set of results, or null if there are no + // more results. + NextToken *string `type:"string"` + // Information about the permissions on the folder. Permissions []*ResourcePermission `min:"1" type:"list"` @@ -45494,6 +45857,12 @@ func (s *DescribeFolderPermissionsOutput) SetFolderId(v string) *DescribeFolderP return s } +// SetNextToken sets the NextToken field's value. +func (s *DescribeFolderPermissionsOutput) SetNextToken(v string) *DescribeFolderPermissionsOutput { + s.NextToken = &v + return s +} + // SetPermissions sets the Permissions field's value. func (s *DescribeFolderPermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeFolderPermissionsOutput { s.Permissions = v @@ -45524,6 +45893,15 @@ type DescribeFolderResolvedPermissionsInput struct { // // FolderId is a required field FolderId *string `location:"uri" locationName:"FolderId" min:"1" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace of the folder whose permissions you want described. + Namespace *string `location:"querystring" locationName:"namespace" type:"string"` + + // A pagination token for the next set of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation. @@ -45559,6 +45937,9 @@ func (s *DescribeFolderResolvedPermissionsInput) Validate() error { if s.FolderId != nil && len(*s.FolderId) < 1 { invalidParams.Add(request.NewErrParamMinLen("FolderId", 1)) } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -45578,6 +45959,24 @@ func (s *DescribeFolderResolvedPermissionsInput) SetFolderId(v string) *Describe return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFolderResolvedPermissionsInput) SetMaxResults(v int64) *DescribeFolderResolvedPermissionsInput { + s.MaxResults = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DescribeFolderResolvedPermissionsInput) SetNamespace(v string) *DescribeFolderResolvedPermissionsInput { + s.Namespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFolderResolvedPermissionsInput) SetNextToken(v string) *DescribeFolderResolvedPermissionsInput { + s.NextToken = &v + return s +} + type DescribeFolderResolvedPermissionsOutput struct { _ struct{} `type:"structure"` @@ -45587,6 +45986,10 @@ type DescribeFolderResolvedPermissionsOutput struct { // The ID of the folder. FolderId *string `min:"1" type:"string"` + // A pagination token for the next set of results, or null if there are no more + // results. + NextToken *string `type:"string"` + // Information about the permissions for the folder. Permissions []*ResourcePermission `min:"1" type:"list"` @@ -45627,6 +46030,12 @@ func (s *DescribeFolderResolvedPermissionsOutput) SetFolderId(v string) *Describ return s } +// SetNextToken sets the NextToken field's value. +func (s *DescribeFolderResolvedPermissionsOutput) SetNextToken(v string) *DescribeFolderResolvedPermissionsOutput { + s.NextToken = &v + return s +} + // SetPermissions sets the Permissions field's value. func (s *DescribeFolderResolvedPermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeFolderResolvedPermissionsOutput { s.Permissions = v @@ -51798,6 +52207,9 @@ func (s *FilterRelativeDateTimeControl) SetTitle(v string) *FilterRelativeDateTi type FilterScopeConfiguration struct { _ struct{} `type:"structure"` + // The configuration for applying a filter to all sheets. + AllSheets *AllSheetsFilterScopeConfiguration `type:"structure"` + // The configuration for applying a filter to specific sheets. SelectedSheets *SelectedSheetsFilterScopeConfiguration `type:"structure"` } @@ -51835,6 +52247,12 @@ func (s *FilterScopeConfiguration) Validate() error { return nil } +// SetAllSheets sets the AllSheets field's value. +func (s *FilterScopeConfiguration) SetAllSheets(v *AllSheetsFilterScopeConfiguration) *FilterScopeConfiguration { + s.AllSheets = v + return s +} + // SetSelectedSheets sets the SelectedSheets field's value. func (s *FilterScopeConfiguration) SetSelectedSheets(v *SelectedSheetsFilterScopeConfiguration) *FilterScopeConfiguration { s.SelectedSheets = v @@ -52259,6 +52677,9 @@ type Folder struct { // A display name for the folder. Name *string `min:"1" type:"string"` + + // The sharing scope of the folder. + SharingModel *string `type:"string" enum:"SharingModel"` } // String returns the string representation. @@ -52321,6 +52742,12 @@ func (s *Folder) SetName(v string) *Folder { return s } +// SetSharingModel sets the SharingModel field's value. +func (s *Folder) SetSharingModel(v string) *Folder { + s.SharingModel = &v + return s +} + // An asset in a Amazon QuickSight folder, such as a dashboard, analysis, or // dataset. type FolderMember struct { @@ -52478,6 +52905,9 @@ type FolderSummary struct { // The display name of the folder. Name *string `min:"1" type:"string"` + + // The sharing scope of the folder. + SharingModel *string `type:"string" enum:"SharingModel"` } // String returns the string representation. @@ -52534,6 +52964,12 @@ func (s *FolderSummary) SetName(v string) *FolderSummary { return s } +// SetSharingModel sets the SharingModel field's value. +func (s *FolderSummary) SetSharingModel(v string) *FolderSummary { + s.SharingModel = &v + return s +} + // Determines the font settings. type Font struct { _ struct{} `type:"structure"` @@ -52734,9 +53170,7 @@ type ForecastComputation struct { Seasonality *string `type:"string" enum:"ForecastComputationSeasonality"` // The time field that is used in a computation. - // - // Time is a required field - Time *DimensionField `type:"structure" required:"true"` + Time *DimensionField `type:"structure"` // The upper boundary setup of a forecast computation. UpperBoundary *float64 `type:"double"` @@ -52781,9 +53215,6 @@ func (s *ForecastComputation) Validate() error { if s.PredictionInterval != nil && *s.PredictionInterval < 50 { invalidParams.Add(request.NewErrParamMinValue("PredictionInterval", 50)) } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } if s.Time != nil { if err := s.Time.Validate(); err != nil { invalidParams.AddNested("Time", err.(request.ErrInvalidParams)) @@ -57055,9 +57486,7 @@ type GrowthRateComputation struct { PeriodSize *int64 `min:"2" type:"integer"` // The time field that is used in a computation. - // - // Time is a required field - Time *DimensionField `type:"structure" required:"true"` + Time *DimensionField `type:"structure"` // The value field that is used in a computation. Value *MeasureField `type:"structure"` @@ -57093,9 +57522,6 @@ func (s *GrowthRateComputation) Validate() error { if s.PeriodSize != nil && *s.PeriodSize < 2 { invalidParams.Add(request.NewErrParamMinValue("PeriodSize", 2)) } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } if s.Time != nil { if err := s.Time.Validate(); err != nil { invalidParams.AddNested("Time", err.(request.ErrInvalidParams)) @@ -66451,9 +66877,7 @@ type MaximumMinimumComputation struct { Name *string `type:"string"` // The time field that is used in a computation. - // - // Time is a required field - Time *DimensionField `type:"structure" required:"true"` + Time *DimensionField `type:"structure"` // The type of computation. Choose one of the following options: // @@ -66495,9 +66919,6 @@ func (s *MaximumMinimumComputation) Validate() error { if s.ComputationId != nil && len(*s.ComputationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ComputationId", 1)) } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -66689,22 +67110,16 @@ type MetricComparisonComputation struct { ComputationId *string `min:"1" type:"string" required:"true"` // The field that is used in a metric comparison from value setup. - // - // FromValue is a required field - FromValue *MeasureField `type:"structure" required:"true"` + FromValue *MeasureField `type:"structure"` // The name of a computation. Name *string `type:"string"` // The field that is used in a metric comparison to value setup. - // - // TargetValue is a required field - TargetValue *MeasureField `type:"structure" required:"true"` + TargetValue *MeasureField `type:"structure"` // The time field that is used in a computation. - // - // Time is a required field - Time *DimensionField `type:"structure" required:"true"` + Time *DimensionField `type:"structure"` } // String returns the string representation. @@ -66734,15 +67149,6 @@ func (s *MetricComparisonComputation) Validate() error { if s.ComputationId != nil && len(*s.ComputationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ComputationId", 1)) } - if s.FromValue == nil { - invalidParams.Add(request.NewErrParamRequired("FromValue")) - } - if s.TargetValue == nil { - invalidParams.Add(request.NewErrParamRequired("TargetValue")) - } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } if s.FromValue != nil { if err := s.FromValue.Validate(); err != nil { invalidParams.AddNested("FromValue", err.(request.ErrInvalidParams)) @@ -70215,9 +70621,7 @@ type PeriodOverPeriodComputation struct { Name *string `type:"string"` // The time field that is used in a computation. - // - // Time is a required field - Time *DimensionField `type:"structure" required:"true"` + Time *DimensionField `type:"structure"` // The value field that is used in a computation. Value *MeasureField `type:"structure"` @@ -70250,9 +70654,6 @@ func (s *PeriodOverPeriodComputation) Validate() error { if s.ComputationId != nil && len(*s.ComputationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ComputationId", 1)) } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } if s.Time != nil { if err := s.Time.Validate(); err != nil { invalidParams.AddNested("Time", err.(request.ErrInvalidParams)) @@ -70315,9 +70716,7 @@ type PeriodToDateComputation struct { PeriodTimeGranularity *string `type:"string" enum:"TimeGranularity"` // The time field that is used in a computation. - // - // Time is a required field - Time *DimensionField `type:"structure" required:"true"` + Time *DimensionField `type:"structure"` // The value field that is used in a computation. Value *MeasureField `type:"structure"` @@ -70350,9 +70749,6 @@ func (s *PeriodToDateComputation) Validate() error { if s.ComputationId != nil && len(*s.ComputationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ComputationId", 1)) } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } if s.Time != nil { if err := s.Time.Validate(); err != nil { invalidParams.AddNested("Time", err.(request.ErrInvalidParams)) @@ -81743,8 +82139,8 @@ func (s *SnapshotDestinationConfiguration) SetS3Destinations(v []*SnapshotS3Dest type SnapshotFile struct { _ struct{} `type:"structure"` - // The format of the snapshot file to be generated. You can choose between CSV - // or PDF. + // The format of the snapshot file to be generated. You can choose between CSV, + // Excel, or PDF. // // FormatType is a required field FormatType *string `type:"string" required:"true" enum:"SnapshotFileFormatType"` @@ -81752,7 +82148,8 @@ type SnapshotFile struct { // A list of SnapshotFileSheetSelection objects that contain information on // the dashboard sheet that is exported. These objects provide information about // the snapshot artifacts that are generated during the job. This structure - // can hold a maximum of 5 CSV configurations or 1 configuration for PDF. + // can hold a maximum of 5 CSV configurations, 5 Excel configurations, or 1 + // configuration for PDF. // // SheetSelections is a required field SheetSelections []*SnapshotFileSheetSelection `min:"1" type:"list" required:"true"` @@ -81886,20 +82283,24 @@ type SnapshotFileSheetSelection struct { // is required if the snapshot is a PDF. // // * SELECTED_VISUALS - Select the visual that you want to add to the snapshot. - // This value is required if the snapshot is a CSV. + // This value is required if the snapshot is a CSV or Excel workbook. // // SelectionScope is a required field SelectionScope *string `type:"string" required:"true" enum:"SnapshotFileSheetSelectionScope"` // The sheet ID of the dashboard to generate the snapshot artifact from. This - // value is required for CSV and PDF format types. + // value is required for CSV, Excel, and PDF format types. // // SheetId is a required field SheetId *string `min:"1" type:"string" required:"true"` - // A list of visual IDs that are located in the selected sheet. This structure - // supports tables and pivot tables. This structure is required if you are generating - // a CSV. You can add a maximum of 1 visual ID to this structure. + // A structure that lists the IDs of the visuals in the selected sheet. Supported + // visual types are table, pivot table visuals. This value is required if you + // are generating a CSV or Excel workbook. This value supports a maximum of + // 1 visual ID for CSV and 5 visual IDs across up to 5 sheet selections for + // Excel. If you are generating an Excel workbook, the order of the visual IDs + // provided in this structure determines the order of the worksheets in the + // Excel file. VisualIds []*string `min:"1" type:"list"` } @@ -84803,14 +85204,18 @@ func (s *TableFieldOption) SetWidth(v string) *TableFieldOption { return s } -// The field options for a table visual. +// The field options of a table visual. type TableFieldOptions struct { _ struct{} `type:"structure"` - // The order of field IDs of the field options for a table visual. + // The order of the field IDs that are configured as field options for a table + // visual. Order []*string `type:"list"` - // The selected field options for the table field options. + // The settings for the pinned columns of a table visual. + PinnedFieldOptions *TablePinnedFieldOptions `type:"structure"` + + // The field options to be configured to a table. SelectedFieldOptions []*TableFieldOption `type:"list"` } @@ -84858,6 +85263,12 @@ func (s *TableFieldOptions) SetOrder(v []*string) *TableFieldOptions { return s } +// SetPinnedFieldOptions sets the PinnedFieldOptions field's value. +func (s *TableFieldOptions) SetPinnedFieldOptions(v *TablePinnedFieldOptions) *TableFieldOptions { + s.PinnedFieldOptions = v + return s +} + // SetSelectedFieldOptions sets the SelectedFieldOptions field's value. func (s *TableFieldOptions) SetSelectedFieldOptions(v []*TableFieldOption) *TableFieldOptions { s.SelectedFieldOptions = v @@ -85152,6 +85563,38 @@ func (s *TablePaginatedReportOptions) SetVerticalOverflowVisibility(v string) *T return s } +// The settings for the pinned columns of a table visual. +type TablePinnedFieldOptions struct { + _ struct{} `type:"structure"` + + // A list of columns to be pinned to the left of a table visual. + PinnedLeftFields []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TablePinnedFieldOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TablePinnedFieldOptions) GoString() string { + return s.String() +} + +// SetPinnedLeftFields sets the PinnedLeftFields field's value. +func (s *TablePinnedFieldOptions) SetPinnedLeftFields(v []*string) *TablePinnedFieldOptions { + s.PinnedLeftFields = v + return s +} + // The conditional formatting of a table row. type TableRowConditionalFormatting struct { _ struct{} `type:"structure"` @@ -88464,9 +88907,7 @@ type TopBottomMoversComputation struct { _ struct{} `type:"structure"` // The category field that is used in a computation. - // - // Category is a required field - Category *DimensionField `type:"structure" required:"true"` + Category *DimensionField `type:"structure"` // The ID for a computation. // @@ -88483,9 +88924,7 @@ type TopBottomMoversComputation struct { SortOrder *string `type:"string" enum:"TopBottomSortOrder"` // The time field that is used in a computation. - // - // Time is a required field - Time *DimensionField `type:"structure" required:"true"` + Time *DimensionField `type:"structure"` // The computation type. Choose from the following options: // @@ -88521,9 +88960,6 @@ func (s TopBottomMoversComputation) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *TopBottomMoversComputation) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TopBottomMoversComputation"} - if s.Category == nil { - invalidParams.Add(request.NewErrParamRequired("Category")) - } if s.ComputationId == nil { invalidParams.Add(request.NewErrParamRequired("ComputationId")) } @@ -88533,9 +88969,6 @@ func (s *TopBottomMoversComputation) Validate() error { if s.MoverSize != nil && *s.MoverSize < 1 { invalidParams.Add(request.NewErrParamMinValue("MoverSize", 1)) } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -88614,9 +89047,7 @@ type TopBottomRankedComputation struct { _ struct{} `type:"structure"` // The category field that is used in a computation. - // - // Category is a required field - Category *DimensionField `type:"structure" required:"true"` + Category *DimensionField `type:"structure"` // The ID for a computation. // @@ -88663,9 +89094,6 @@ func (s TopBottomRankedComputation) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *TopBottomRankedComputation) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TopBottomRankedComputation"} - if s.Category == nil { - invalidParams.Add(request.NewErrParamRequired("Category")) - } if s.ComputationId == nil { invalidParams.Add(request.NewErrParamRequired("ComputationId")) } @@ -90136,9 +90564,7 @@ type TotalAggregationComputation struct { Name *string `type:"string"` // The value field that is used in a computation. - // - // Value is a required field - Value *MeasureField `type:"structure" required:"true"` + Value *MeasureField `type:"structure"` } // String returns the string representation. @@ -90168,9 +90594,6 @@ func (s *TotalAggregationComputation) Validate() error { if s.ComputationId != nil && len(*s.ComputationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ComputationId", 1)) } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } if s.Value != nil { if err := s.Value.Validate(); err != nil { invalidParams.AddNested("Value", err.(request.ErrInvalidParams)) @@ -91329,9 +91752,7 @@ type UniqueValuesComputation struct { _ struct{} `type:"structure"` // The category field that is used in a computation. - // - // Category is a required field - Category *DimensionField `type:"structure" required:"true"` + Category *DimensionField `type:"structure"` // The ID for a computation. // @@ -91363,9 +91784,6 @@ func (s UniqueValuesComputation) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UniqueValuesComputation) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UniqueValuesComputation"} - if s.Category == nil { - invalidParams.Add(request.NewErrParamRequired("Category")) - } if s.ComputationId == nil { invalidParams.Add(request.NewErrParamRequired("ComputationId")) } @@ -103279,6 +103697,22 @@ func SelectedTooltipType_Values() []string { } } +const ( + // SharingModelAccount is a SharingModel enum value + SharingModelAccount = "ACCOUNT" + + // SharingModelNamespace is a SharingModel enum value + SharingModelNamespace = "NAMESPACE" +) + +// SharingModel_Values returns all elements of the SharingModel enum +func SharingModel_Values() []string { + return []string{ + SharingModelAccount, + SharingModelNamespace, + } +} + const ( // SheetContentTypePaginated is a SheetContentType enum value SheetContentTypePaginated = "PAGINATED" @@ -103445,6 +103879,9 @@ const ( // SnapshotFileFormatTypePdf is a SnapshotFileFormatType enum value SnapshotFileFormatTypePdf = "PDF" + + // SnapshotFileFormatTypeExcel is a SnapshotFileFormatType enum value + SnapshotFileFormatTypeExcel = "EXCEL" ) // SnapshotFileFormatType_Values returns all elements of the SnapshotFileFormatType enum @@ -103452,6 +103889,7 @@ func SnapshotFileFormatType_Values() []string { return []string{ SnapshotFileFormatTypeCsv, SnapshotFileFormatTypePdf, + SnapshotFileFormatTypeExcel, } } diff --git a/service/quicksight/quicksightiface/interface.go b/service/quicksight/quicksightiface/interface.go index 803b0f5e370..7666b784593 100644 --- a/service/quicksight/quicksightiface/interface.go +++ b/service/quicksight/quicksightiface/interface.go @@ -320,10 +320,16 @@ type QuickSightAPI interface { DescribeFolderPermissionsWithContext(aws.Context, *quicksight.DescribeFolderPermissionsInput, ...request.Option) (*quicksight.DescribeFolderPermissionsOutput, error) DescribeFolderPermissionsRequest(*quicksight.DescribeFolderPermissionsInput) (*request.Request, *quicksight.DescribeFolderPermissionsOutput) + DescribeFolderPermissionsPages(*quicksight.DescribeFolderPermissionsInput, func(*quicksight.DescribeFolderPermissionsOutput, bool) bool) error + DescribeFolderPermissionsPagesWithContext(aws.Context, *quicksight.DescribeFolderPermissionsInput, func(*quicksight.DescribeFolderPermissionsOutput, bool) bool, ...request.Option) error + DescribeFolderResolvedPermissions(*quicksight.DescribeFolderResolvedPermissionsInput) (*quicksight.DescribeFolderResolvedPermissionsOutput, error) DescribeFolderResolvedPermissionsWithContext(aws.Context, *quicksight.DescribeFolderResolvedPermissionsInput, ...request.Option) (*quicksight.DescribeFolderResolvedPermissionsOutput, error) DescribeFolderResolvedPermissionsRequest(*quicksight.DescribeFolderResolvedPermissionsInput) (*request.Request, *quicksight.DescribeFolderResolvedPermissionsOutput) + DescribeFolderResolvedPermissionsPages(*quicksight.DescribeFolderResolvedPermissionsInput, func(*quicksight.DescribeFolderResolvedPermissionsOutput, bool) bool) error + DescribeFolderResolvedPermissionsPagesWithContext(aws.Context, *quicksight.DescribeFolderResolvedPermissionsInput, func(*quicksight.DescribeFolderResolvedPermissionsOutput, bool) bool, ...request.Option) error + DescribeGroup(*quicksight.DescribeGroupInput) (*quicksight.DescribeGroupOutput, error) DescribeGroupWithContext(aws.Context, *quicksight.DescribeGroupInput, ...request.Option) (*quicksight.DescribeGroupOutput, error) DescribeGroupRequest(*quicksight.DescribeGroupInput) (*request.Request, *quicksight.DescribeGroupOutput) @@ -473,10 +479,16 @@ type QuickSightAPI interface { ListFolderMembersWithContext(aws.Context, *quicksight.ListFolderMembersInput, ...request.Option) (*quicksight.ListFolderMembersOutput, error) ListFolderMembersRequest(*quicksight.ListFolderMembersInput) (*request.Request, *quicksight.ListFolderMembersOutput) + ListFolderMembersPages(*quicksight.ListFolderMembersInput, func(*quicksight.ListFolderMembersOutput, bool) bool) error + ListFolderMembersPagesWithContext(aws.Context, *quicksight.ListFolderMembersInput, func(*quicksight.ListFolderMembersOutput, bool) bool, ...request.Option) error + ListFolders(*quicksight.ListFoldersInput) (*quicksight.ListFoldersOutput, error) ListFoldersWithContext(aws.Context, *quicksight.ListFoldersInput, ...request.Option) (*quicksight.ListFoldersOutput, error) ListFoldersRequest(*quicksight.ListFoldersInput) (*request.Request, *quicksight.ListFoldersOutput) + ListFoldersPages(*quicksight.ListFoldersInput, func(*quicksight.ListFoldersOutput, bool) bool) error + ListFoldersPagesWithContext(aws.Context, *quicksight.ListFoldersInput, func(*quicksight.ListFoldersOutput, bool) bool, ...request.Option) error + ListGroupMemberships(*quicksight.ListGroupMembershipsInput) (*quicksight.ListGroupMembershipsOutput, error) ListGroupMembershipsWithContext(aws.Context, *quicksight.ListGroupMembershipsInput, ...request.Option) (*quicksight.ListGroupMembershipsOutput, error) ListGroupMembershipsRequest(*quicksight.ListGroupMembershipsInput) (*request.Request, *quicksight.ListGroupMembershipsOutput) @@ -642,6 +654,9 @@ type QuickSightAPI interface { SearchFoldersWithContext(aws.Context, *quicksight.SearchFoldersInput, ...request.Option) (*quicksight.SearchFoldersOutput, error) SearchFoldersRequest(*quicksight.SearchFoldersInput) (*request.Request, *quicksight.SearchFoldersOutput) + SearchFoldersPages(*quicksight.SearchFoldersInput, func(*quicksight.SearchFoldersOutput, bool) bool) error + SearchFoldersPagesWithContext(aws.Context, *quicksight.SearchFoldersInput, func(*quicksight.SearchFoldersOutput, bool) bool, ...request.Option) error + SearchGroups(*quicksight.SearchGroupsInput) (*quicksight.SearchGroupsOutput, error) SearchGroupsWithContext(aws.Context, *quicksight.SearchGroupsInput, ...request.Option) (*quicksight.SearchGroupsOutput, error) SearchGroupsRequest(*quicksight.SearchGroupsInput) (*request.Request, *quicksight.SearchGroupsOutput) diff --git a/service/rds/api.go b/service/rds/api.go index 42fc94c4b05..1e39cad203e 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -5270,6 +5270,12 @@ func (c *RDS) DescribeDBClusterAutomatedBackupsRequest(input *DescribeDBClusterA Name: opDescribeDBClusterAutomatedBackups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -5323,6 +5329,57 @@ func (c *RDS) DescribeDBClusterAutomatedBackupsWithContext(ctx aws.Context, inpu return out, req.Send() } +// DescribeDBClusterAutomatedBackupsPages iterates over the pages of a DescribeDBClusterAutomatedBackups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterAutomatedBackups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterAutomatedBackups operation. +// pageNum := 0 +// err := client.DescribeDBClusterAutomatedBackupsPages(params, +// func(page *rds.DescribeDBClusterAutomatedBackupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *RDS) DescribeDBClusterAutomatedBackupsPages(input *DescribeDBClusterAutomatedBackupsInput, fn func(*DescribeDBClusterAutomatedBackupsOutput, bool) bool) error { + return c.DescribeDBClusterAutomatedBackupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterAutomatedBackupsPagesWithContext same as DescribeDBClusterAutomatedBackupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBClusterAutomatedBackupsPagesWithContext(ctx aws.Context, input *DescribeDBClusterAutomatedBackupsInput, fn func(*DescribeDBClusterAutomatedBackupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterAutomatedBackupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterAutomatedBackupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterAutomatedBackupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterBacktracks = "DescribeDBClusterBacktracks" // DescribeDBClusterBacktracksRequest generates a "aws/request.Request" representing the @@ -48376,7 +48433,7 @@ type RestoreDBClusterFromS3Input struct { // // Aurora MySQL // - // Examples: 5.7.mysql_aurora.2.07.1, 8.0.mysql_aurora.3.02.0 + // Examples: 5.7.mysql_aurora.2.12.0, 8.0.mysql_aurora.3.04.0 EngineVersion *string `type:"string"` // The Amazon Web Services KMS key identifier for an encrypted DB cluster. @@ -48549,9 +48606,9 @@ type RestoreDBClusterFromS3Input struct { // The version of the database that the backup files were created from. // - // MySQL versions 5.5, 5.6, and 5.7 are supported. + // MySQL versions 5.7 and 8.0 are supported. // - // Example: 5.6.40, 5.7.28 + // Example: 5.7.40, 8.0.28 // // SourceEngineVersion is a required field SourceEngineVersion *string `type:"string" required:"true"` diff --git a/service/rds/rdsiface/interface.go b/service/rds/rdsiface/interface.go index cb229a4670a..0c7b81d2764 100644 --- a/service/rds/rdsiface/interface.go +++ b/service/rds/rdsiface/interface.go @@ -278,6 +278,9 @@ type RDSAPI interface { DescribeDBClusterAutomatedBackupsWithContext(aws.Context, *rds.DescribeDBClusterAutomatedBackupsInput, ...request.Option) (*rds.DescribeDBClusterAutomatedBackupsOutput, error) DescribeDBClusterAutomatedBackupsRequest(*rds.DescribeDBClusterAutomatedBackupsInput) (*request.Request, *rds.DescribeDBClusterAutomatedBackupsOutput) + DescribeDBClusterAutomatedBackupsPages(*rds.DescribeDBClusterAutomatedBackupsInput, func(*rds.DescribeDBClusterAutomatedBackupsOutput, bool) bool) error + DescribeDBClusterAutomatedBackupsPagesWithContext(aws.Context, *rds.DescribeDBClusterAutomatedBackupsInput, func(*rds.DescribeDBClusterAutomatedBackupsOutput, bool) bool, ...request.Option) error + DescribeDBClusterBacktracks(*rds.DescribeDBClusterBacktracksInput) (*rds.DescribeDBClusterBacktracksOutput, error) DescribeDBClusterBacktracksWithContext(aws.Context, *rds.DescribeDBClusterBacktracksInput, ...request.Option) (*rds.DescribeDBClusterBacktracksOutput, error) DescribeDBClusterBacktracksRequest(*rds.DescribeDBClusterBacktracksInput) (*request.Request, *rds.DescribeDBClusterBacktracksOutput) diff --git a/service/s3/examples_test.go b/service/s3/examples_test.go index 7c8236db113..239ff70cbea 100644 --- a/service/s3/examples_test.go +++ b/service/s3/examples_test.go @@ -125,16 +125,12 @@ func ExampleS3_CopyObject_shared00() { fmt.Println(result) } -// To create a bucket in a specific region -// The following example creates a bucket. The request specifies an AWS region where -// to create the bucket. +// To create a bucket +// The following example creates a bucket. func ExampleS3_CreateBucket_shared00() { svc := s3.New(session.New()) input := &s3.CreateBucketInput{ Bucket: aws.String("examplebucket"), - CreateBucketConfiguration: &s3.CreateBucketConfiguration{ - LocationConstraint: aws.String("eu-west-1"), - }, } result, err := svc.CreateBucket(input) @@ -159,12 +155,16 @@ func ExampleS3_CreateBucket_shared00() { fmt.Println(result) } -// To create a bucket -// The following example creates a bucket. +// To create a bucket in a specific region +// The following example creates a bucket. The request specifies an AWS region where +// to create the bucket. func ExampleS3_CreateBucket_shared01() { svc := s3.New(session.New()) input := &s3.CreateBucketInput{ Bucket: aws.String("examplebucket"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String("eu-west-1"), + }, } result, err := svc.CreateBucket(input) @@ -398,13 +398,13 @@ func ExampleS3_DeleteBucketWebsite_shared00() { fmt.Println(result) } -// To delete an object (from a non-versioned bucket) -// The following example deletes an object from a non-versioned bucket. +// To delete an object +// The following example deletes an object from an S3 bucket. func ExampleS3_DeleteObject_shared00() { svc := s3.New(session.New()) input := &s3.DeleteObjectInput{ - Bucket: aws.String("ExampleBucket"), - Key: aws.String("HappyFace.jpg"), + Bucket: aws.String("examplebucket"), + Key: aws.String("objectkey.jpg"), } result, err := svc.DeleteObject(input) @@ -425,13 +425,13 @@ func ExampleS3_DeleteObject_shared00() { fmt.Println(result) } -// To delete an object -// The following example deletes an object from an S3 bucket. +// To delete an object (from a non-versioned bucket) +// The following example deletes an object from a non-versioned bucket. func ExampleS3_DeleteObject_shared01() { svc := s3.New(session.New()) input := &s3.DeleteObjectInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("objectkey.jpg"), + Bucket: aws.String("ExampleBucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.DeleteObject(input) @@ -452,15 +452,15 @@ func ExampleS3_DeleteObject_shared01() { fmt.Println(result) } -// To remove tag set from an object -// The following example removes tag set associated with the specified object. If the -// bucket is versioning enabled, the operation removes tag set from the latest object -// version. +// To remove tag set from an object version +// The following example removes tag set associated with the specified object version. +// The request specifies both the object key and object version. func ExampleS3_DeleteObjectTagging_shared00() { svc := s3.New(session.New()) input := &s3.DeleteObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), } result, err := svc.DeleteObjectTagging(input) @@ -481,15 +481,15 @@ func ExampleS3_DeleteObjectTagging_shared00() { fmt.Println(result) } -// To remove tag set from an object version -// The following example removes tag set associated with the specified object version. -// The request specifies both the object key and object version. +// To remove tag set from an object +// The following example removes tag set associated with the specified object. If the +// bucket is versioning enabled, the operation removes tag set from the latest object +// version. func ExampleS3_DeleteObjectTagging_shared01() { svc := s3.New(session.New()) input := &s3.DeleteObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.DeleteObjectTagging(input) @@ -903,15 +903,13 @@ func ExampleS3_GetBucketWebsite_shared00() { fmt.Println(result) } -// To retrieve a byte range of an object -// The following example retrieves an object for an S3 bucket. The request specifies -// the range header to retrieve a specific byte range. +// To retrieve an object +// The following example retrieves an object for an S3 bucket. func ExampleS3_GetObject_shared00() { svc := s3.New(session.New()) input := &s3.GetObjectInput{ Bucket: aws.String("examplebucket"), - Key: aws.String("SampleFile.txt"), - Range: aws.String("bytes=0-9"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.GetObject(input) @@ -936,13 +934,15 @@ func ExampleS3_GetObject_shared00() { fmt.Println(result) } -// To retrieve an object -// The following example retrieves an object for an S3 bucket. +// To retrieve a byte range of an object +// The following example retrieves an object for an S3 bucket. The request specifies +// the range header to retrieve a specific byte range. func ExampleS3_GetObject_shared01() { svc := s3.New(session.New()) input := &s3.GetObjectInput{ Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Key: aws.String("SampleFile.txt"), + Range: aws.String("bytes=0-9"), } result, err := svc.GetObject(input) @@ -1158,12 +1158,16 @@ func ExampleS3_ListBuckets_shared00() { fmt.Println(result) } -// To list in-progress multipart uploads on a bucket -// The following example lists in-progress multipart uploads on a specific bucket. +// List next set of multipart uploads when previous result is truncated +// The following example specifies the upload-id-marker and key-marker from previous +// truncated response to retrieve next setup of multipart uploads. func ExampleS3_ListMultipartUploads_shared00() { svc := s3.New(session.New()) input := &s3.ListMultipartUploadsInput{ - Bucket: aws.String("examplebucket"), + Bucket: aws.String("examplebucket"), + KeyMarker: aws.String("nextkeyfrompreviousresponse"), + MaxUploads: aws.Int64(2), + UploadIdMarker: aws.String("valuefrompreviousresponse"), } result, err := svc.ListMultipartUploads(input) @@ -1184,16 +1188,12 @@ func ExampleS3_ListMultipartUploads_shared00() { fmt.Println(result) } -// List next set of multipart uploads when previous result is truncated -// The following example specifies the upload-id-marker and key-marker from previous -// truncated response to retrieve next setup of multipart uploads. +// To list in-progress multipart uploads on a bucket +// The following example lists in-progress multipart uploads on a specific bucket. func ExampleS3_ListMultipartUploads_shared01() { svc := s3.New(session.New()) input := &s3.ListMultipartUploadsInput{ - Bucket: aws.String("examplebucket"), - KeyMarker: aws.String("nextkeyfrompreviousresponse"), - MaxUploads: aws.Int64(2), - UploadIdMarker: aws.String("valuefrompreviousresponse"), + Bucket: aws.String("examplebucket"), } result, err := svc.ListMultipartUploads(input) @@ -1748,16 +1748,17 @@ func ExampleS3_PutBucketWebsite_shared00() { fmt.Println(result) } -// To upload an object -// The following example uploads an object to a versioning-enabled bucket. The source -// file is specified using Windows file syntax. S3 returns VersionId of the newly created -// object. +// To upload an object (specify optional headers) +// The following example uploads an object. The request specifies optional request headers +// to directs S3 to use specific storage class and use server-side encryption. func ExampleS3_PutObject_shared00() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + ServerSideEncryption: aws.String("AES256"), + StorageClass: aws.String("STANDARD_IA"), } result, err := svc.PutObject(input) @@ -1778,19 +1779,16 @@ func ExampleS3_PutObject_shared00() { fmt.Println(result) } -// To upload object and specify user-defined metadata -// The following example creates an object. The request also specifies optional metadata. -// If the bucket is versioning enabled, S3 returns version ID in response. +// To upload an object and specify optional tags +// The following example uploads an object. The request specifies optional object tags. +// The bucket is versioned, therefore S3 returns version ID of the newly created object. func ExampleS3_PutObject_shared01() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - Metadata: map[string]*string{ - "metadata1": aws.String("value1"), - "metadata2": aws.String("value2"), - }, + Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -1811,17 +1809,18 @@ func ExampleS3_PutObject_shared01() { fmt.Println(result) } -// To upload an object (specify optional headers) -// The following example uploads an object. The request specifies optional request headers -// to directs S3 to use specific storage class and use server-side encryption. +// To upload an object and specify server-side encryption and object tags +// The following example uploads an object. The request specifies the optional server-side +// encryption option. The request also specifies optional object tags. If the bucket +// is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared02() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Key: aws.String("exampleobject"), ServerSideEncryption: aws.String("AES256"), - StorageClass: aws.String("STANDARD_IA"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -1871,18 +1870,16 @@ func ExampleS3_PutObject_shared03() { fmt.Println(result) } -// To upload an object and specify server-side encryption and object tags -// The following example uploads an object. The request specifies the optional server-side -// encryption option. The request also specifies optional object tags. If the bucket -// is versioning enabled, S3 returns version ID in response. +// To upload an object +// The following example uploads an object to a versioning-enabled bucket. The source +// file is specified using Windows file syntax. S3 returns VersionId of the newly created +// object. func ExampleS3_PutObject_shared04() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - ServerSideEncryption: aws.String("AES256"), - Tagging: aws.String("key1=value1&key2=value2"), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.PutObject(input) @@ -1903,16 +1900,17 @@ func ExampleS3_PutObject_shared04() { fmt.Println(result) } -// To upload an object and specify optional tags -// The following example uploads an object. The request specifies optional object tags. -// The bucket is versioned, therefore S3 returns version ID of the newly created object. +// To upload an object and specify canned ACL. +// The following example uploads and object. The request specifies optional canned ACL +// (access control list) to all READ access to authenticated users. If the bucket is +// versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared05() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - Tagging: aws.String("key1=value1&key2=value2"), + ACL: aws.String("authenticated-read"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), } result, err := svc.PutObject(input) @@ -1933,17 +1931,19 @@ func ExampleS3_PutObject_shared05() { fmt.Println(result) } -// To upload an object and specify canned ACL. -// The following example uploads and object. The request specifies optional canned ACL -// (access control list) to all READ access to authenticated users. If the bucket is -// versioning enabled, S3 returns version ID in response. +// To upload object and specify user-defined metadata +// The following example creates an object. The request also specifies optional metadata. +// If the bucket is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared06() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - ACL: aws.String("authenticated-read"), Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), Bucket: aws.String("examplebucket"), Key: aws.String("exampleobject"), + Metadata: map[string]*string{ + "metadata1": aws.String("value1"), + "metadata2": aws.String("value2"), + }, } result, err := svc.PutObject(input) diff --git a/service/verifiedpermissions/api.go b/service/verifiedpermissions/api.go index d2bb80190cd..369dcf1a3ed 100644 --- a/service/verifiedpermissions/api.go +++ b/service/verifiedpermissions/api.go @@ -85,6 +85,11 @@ func (c *VerifiedPermissions) CreateIdentitySourceRequest(input *CreateIdentityS // parameter for this operation. The CognitoUserPoolId and CognitoClientId are // defined by the Amazon Cognito user pool. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -237,6 +242,11 @@ func (c *VerifiedPermissions) CreatePolicyRequest(input *CreatePolicyInput) (req // store. If the policy doesn't pass validation, the operation fails and the // policy isn't stored. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -378,6 +388,11 @@ func (c *VerifiedPermissions) CreatePolicyStoreRequest(input *CreatePolicyStoreI // Although Cedar supports multiple namespaces (https://docs.cedarpolicy.com/schema.html#namespace), // Verified Permissions currently supports only one namespace per policy store. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -519,6 +534,11 @@ func (c *VerifiedPermissions) CreatePolicyTemplateRequest(input *CreatePolicyTem // is dynamically linked to the template. If the template changes, then any // policies that are linked to that template are immediately updated as well. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2972,6 +2992,11 @@ func (c *VerifiedPermissions) PutSchemaRequest(input *PutSchemaInput) (req *requ // re-evaluated against the changed schema. If you later update a policy, then // it is evaluated against the new schema at that time. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3116,6 +3141,11 @@ func (c *VerifiedPermissions) UpdateIdentitySourceRequest(input *UpdateIdentityS // source, or to change the mapping of identities from the IdP to a different // principal entity type. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3273,6 +3303,11 @@ func (c *VerifiedPermissions) UpdatePolicyRequest(input *UpdatePolicyInput) (req // // - To update a template-linked policy, you must update the template instead. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3415,6 +3450,11 @@ func (c *VerifiedPermissions) UpdatePolicyStoreRequest(input *UpdatePolicyStoreI // // Modifies the validation setting for a policy store. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3559,6 +3599,11 @@ func (c *VerifiedPermissions) UpdatePolicyTemplateRequest(input *UpdatePolicyTem // in authorization decisions that involve all template-linked policies instantiated // from this template. // +// Verified Permissions is eventually consistent (https://wikipedia.org/wiki/Eventual_consistency) +// . It can take a few seconds for a new or changed element to be propagate +// through the service and be visible in the results of other Verified Permissions +// operations. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5443,9 +5488,9 @@ func (s *EntityIdentifier) SetEntityType(v string) *EntityIdentifier { // This data type is used as one of the fields in the EntitiesDefinition (https://docs.aws.amazon.com/verifiedpermissions/latest/apireference/API_EntitiesDefinition.html) // structure. // -// { "id": { "entityType": "Photo", "entityId": "VacationPhoto94.jpg" }, "Attributes": -// {}, "Parents": [ { "entityType": "Album", "entityId": "alice_folder" } ] -// } +// { "identifier": { "entityType": "Photo", "entityId": "VacationPhoto94.jpg" +// }, "attributes": {}, "parents": [ { "entityType": "Album", "entityId": "alice_folder" +// } ] } type EntityItem struct { _ struct{} `type:"structure"`