From ef3dd3060d501dd02140a9104651ca92be8159d8 Mon Sep 17 00:00:00 2001 From: Chad Greenburg Date: Mon, 10 Feb 2020 15:35:22 -0600 Subject: [PATCH 001/304] Updated lambda_layer_version resource Added a "retain" value to the resource schema. If retain is true, don't delete the old lambda layer. --- aws/resource_aws_lambda_layer_version.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_lambda_layer_version.go b/aws/resource_aws_lambda_layer_version.go index 92d564f38f29..d5166ff9bd72 100644 --- a/aws/resource_aws_lambda_layer_version.go +++ b/aws/resource_aws_lambda_layer_version.go @@ -105,6 +105,11 @@ func resourceAwsLambdaLayerVersion() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "retain": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, } } @@ -225,6 +230,12 @@ func resourceAwsLambdaLayerVersionRead(d *schema.ResourceData, meta interface{}) } func resourceAwsLambdaLayerVersionDelete(d *schema.ResourceData, meta interface{}) error { + retain := d.Get("retain").(bool) + if retain { + log.Printf("[DEBUG] Retaining Lambda Layer %q", d.Get("arn").(string)) + return nil + } + conn := meta.(*AWSClient).lambdaconn version, err := strconv.ParseInt(d.Get("version").(string), 10, 64) @@ -237,7 +248,7 @@ func resourceAwsLambdaLayerVersionDelete(d *schema.ResourceData, meta interface{ VersionNumber: aws.Int64(version), }) if err != nil { - return fmt.Errorf("error deleting Lambda Layer Version (%s): %s", d.Id(), err) + return fmt.Errorf("Error deleting Lambda Layer Version (%s): %s", d.Id(), err) } log.Printf("[DEBUG] Lambda layer %q deleted", d.Get("arn").(string)) From b7f3bd2454b26c33d2dcef8914b3dbc96dad3310 Mon Sep 17 00:00:00 2001 From: Chad Greenburg Date: Mon, 10 Feb 2020 17:03:53 -0600 Subject: [PATCH 002/304] Updated lambda_layer_version docs with new retain patameter --- website/docs/r/lambda_layer_version.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/lambda_layer_version.html.markdown b/website/docs/r/lambda_layer_version.html.markdown index e6840db13af9..79ad5dbaef9d 100644 --- a/website/docs/r/lambda_layer_version.html.markdown +++ b/website/docs/r/lambda_layer_version.html.markdown @@ -46,6 +46,7 @@ large files efficiently. * `description` - (Optional) Description of what your Lambda Layer does. * `license_info` - (Optional) License info for your Lambda Layer. See [License Info][3]. * `source_code_hash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `${filebase64sha256("file.zip")}` (Terraform 0.11.12 or later) or `${base64sha256(file("file.zip"))}` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda layer source archive. +* `retain` - (Optional) Retains old versions of previously deployed Lambda Layers if true. Defaults to false. ## Attributes Reference From 4492c5ad31a3a4cd8a042855c36c977f75dca1f2 Mon Sep 17 00:00:00 2001 From: Anatoliy Volynskiy Date: Thu, 6 Feb 2020 20:28:16 +0200 Subject: [PATCH 003/304] added lambda_layer_permission file --- aws/provider.go | 1 + ...rce_aws_lambda_layer_version_permission.go | 180 ++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100644 aws/resource_aws_lambda_layer_version_permission.go diff --git a/aws/provider.go b/aws/provider.go index d07e1c7deca5..18c7ef221766 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -775,6 +775,7 @@ func Provider() *schema.Provider { "aws_lambda_function_event_invoke_config": resourceAwsLambdaFunctionEventInvokeConfig(), "aws_lambda_function": resourceAwsLambdaFunction(), "aws_lambda_layer_version": resourceAwsLambdaLayerVersion(), + "aws_lambda_layer_version_permission": resourceAwsLambdaLayerVersionPermission(), "aws_lambda_permission": resourceAwsLambdaPermission(), "aws_lambda_provisioned_concurrency_config": resourceAwsLambdaProvisionedConcurrencyConfig(), "aws_launch_configuration": resourceAwsLaunchConfiguration(), diff --git a/aws/resource_aws_lambda_layer_version_permission.go b/aws/resource_aws_lambda_layer_version_permission.go new file mode 100644 index 000000000000..ee9e0a037126 --- /dev/null +++ b/aws/resource_aws_lambda_layer_version_permission.go @@ -0,0 +1,180 @@ +package aws + +import ( + // "errors" + "fmt" + "log" + "strconv" + // "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + // "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsLambdaLayerVersionPermission() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLambdaLayerVersionPermissionAdd, + Read: resourceAwsLambdaLayerVersionPermissionGet, + Delete: resourceAwsLambdaLayerVersionPermissionRemove, + + // Importer: &schema.ResourceImporter{ + // State: schema.ImportStatePassthrough, + // }, + + Schema: map[string]*schema.Schema{ + "layer_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "layer_version": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "statement_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "action": { + Type: schema.TypeString, + Optional: true, // add default value lambda:GetLayerVersion ?? + ForceNew: true, + }, + "principal": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "organization_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsLambdaLayerVersionPermissionAdd(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + layerArn := aws.String(d.Get("layer_arn").(string)) + layerVersion := aws.Int64(int64(d.Get("layer_version").(int))) + statementId := aws.String(d.Get("statement_id").(string)) + principal := aws.String(d.Get("principal").(string)) + organizationId, hasOrganizationId := d.GetOk("organization_id") + action := aws.String("lambda:GetLayerVersion") + if d.Get("action") != nil { + action = aws.String(d.Get("action").(string)) + } + + params := &lambda.AddLayerVersionPermissionInput{ + LayerName: layerArn, + VersionNumber: layerVersion, + Action: action, + Principal: principal, + StatementId: statementId, + } + + if hasOrganizationId { + params.OrganizationId = aws.String(organizationId.(string)) + } + + // if v, ok := d.GetOk("compatible_runtimes"); ok && v.(*schema.Set).Len() > 0 { + // params.CompatibleRuntimes = expandStringList(v.(*schema.Set).List()) + // } + + log.Printf("[DEBUG] Adding Lambda layer permissions: %s", params) + result, err := conn.AddLayerVersionPermission(params) + if err != nil { + return fmt.Errorf("Error adding lambda layer permissions: %s", err) + } + + log.Printf(aws.StringValue(result.Statement)) + + d.SetId(*layerArn + ":" + strconv.FormatInt(*layerVersion, 10)) + + return resourceAwsLambdaLayerVersionPermissionGet(d, meta) +} + +func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + layerName, version, err := resourceAwsLambdaLayerVersionParseId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing lambda layer ID: %s", err) + } + + layerVersionPolicyOutput, err := conn.GetLayerVersionPolicy(&lambda.GetLayerVersionPolicyInput{ + LayerName: aws.String(layerName), + VersionNumber: aws.Int64(version), + }) + + log.Printf("[DEBUG] OUTPUT: %s", layerVersionPolicyOutput) + // if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { + // log.Printf("[WARN] Lambda Layer Version (%s) not found, removing from state", d.Id()) + // d.SetId("") + // return nil + // } + + // if err != nil { + // return fmt.Errorf("error reading Lambda Layer version (%s): %s", d.Id(), err) + // } + + // if err := d.Set("layer_name", layerName); err != nil { + // return fmt.Errorf("Error setting lambda layer name: %s", err) + // } + // if err := d.Set("version", strconv.FormatInt(version, 10)); err != nil { + // return fmt.Errorf("Error setting lambda layer version: %s", err) + // } + // if err := d.Set("arn", layerVersion.LayerVersionArn); err != nil { + // return fmt.Errorf("Error setting lambda layer version arn: %s", err) + // } + // if err := d.Set("layer_arn", layerVersion.LayerArn); err != nil { + // return fmt.Errorf("Error setting lambda layer arn: %s", err) + // } + // if err := d.Set("description", layerVersion.Description); err != nil { + // return fmt.Errorf("Error setting lambda layer description: %s", err) + // } + // if err := d.Set("license_info", layerVersion.LicenseInfo); err != nil { + // return fmt.Errorf("Error setting lambda layer license info: %s", err) + // } + // if err := d.Set("created_date", layerVersion.CreatedDate); err != nil { + // return fmt.Errorf("Error setting lambda layer created date: %s", err) + // } + // if err := d.Set("source_code_hash", layerVersion.Content.CodeSha256); err != nil { + // return fmt.Errorf("Error setting lambda layer source code hash: %s", err) + // } + // if err := d.Set("source_code_size", layerVersion.Content.CodeSize); err != nil { + // return fmt.Errorf("Error setting lambda layer source code size: %s", err) + // } + // if err := d.Set("compatible_runtimes", flattenStringList(layerVersion.CompatibleRuntimes)); err != nil { + // return fmt.Errorf("Error setting lambda layer compatible runtimes: %s", err) + // } + + return nil +} + +func resourceAwsLambdaLayerVersionPermissionRemove(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + layerName, version, err := resourceAwsLambdaLayerVersionParseId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing lambda layer ID: %s", err) + } + + _, err = conn.RemoveLayerVersionPermission(&lambda.RemoveLayerVersionPermissionInput{ + LayerName: aws.String(layerName), + VersionNumber: aws.Int64(int64(version)), + StatementId: aws.String(d.Get("statement_id").(string)), + }) + if err != nil { + return fmt.Errorf("error deleting Lambda Layer Version permission (%s): %s", d.Id(), err) + } + + log.Printf("[DEBUG] Lambda layer permission %q deleted", d.Get("statement_id").(string)) + return nil +} From fbebdfe3bbe8f4709bf62c458e898492bdbfba09 Mon Sep 17 00:00:00 2001 From: Anatoliy Volynskiy Date: Sun, 9 Feb 2020 00:39:00 +0200 Subject: [PATCH 004/304] add tests, update lambda_layer_version_permission --- ...rce_aws_lambda_layer_version_permission.go | 87 +++++------- ...ws_lambda_layer_version_permission_test.go | 132 ++++++++++++++++++ 2 files changed, 165 insertions(+), 54 deletions(-) create mode 100644 aws/resource_aws_lambda_layer_version_permission_test.go diff --git a/aws/resource_aws_lambda_layer_version_permission.go b/aws/resource_aws_lambda_layer_version_permission.go index ee9e0a037126..b609d821ee59 100644 --- a/aws/resource_aws_lambda_layer_version_permission.go +++ b/aws/resource_aws_lambda_layer_version_permission.go @@ -5,9 +5,10 @@ import ( "fmt" "log" "strconv" - // "strings" + "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/lambda" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" // "github.com/hashicorp/terraform-plugin-sdk/helper/validation" @@ -41,7 +42,7 @@ func resourceAwsLambdaLayerVersionPermission() *schema.Resource { }, "action": { Type: schema.TypeString, - Optional: true, // add default value lambda:GetLayerVersion ?? + Required: true, ForceNew: true, }, "principal": { @@ -54,6 +55,10 @@ func resourceAwsLambdaLayerVersionPermission() *schema.Resource { Optional: true, ForceNew: true, }, + "revision_id": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -66,10 +71,7 @@ func resourceAwsLambdaLayerVersionPermissionAdd(d *schema.ResourceData, meta int statementId := aws.String(d.Get("statement_id").(string)) principal := aws.String(d.Get("principal").(string)) organizationId, hasOrganizationId := d.GetOk("organization_id") - action := aws.String("lambda:GetLayerVersion") - if d.Get("action") != nil { - action = aws.String(d.Get("action").(string)) - } + action := aws.String(d.Get("action").(string)) params := &lambda.AddLayerVersionPermissionInput{ LayerName: layerArn, @@ -83,19 +85,16 @@ func resourceAwsLambdaLayerVersionPermissionAdd(d *schema.ResourceData, meta int params.OrganizationId = aws.String(organizationId.(string)) } - // if v, ok := d.GetOk("compatible_runtimes"); ok && v.(*schema.Set).Len() > 0 { - // params.CompatibleRuntimes = expandStringList(v.(*schema.Set).List()) - // } - log.Printf("[DEBUG] Adding Lambda layer permissions: %s", params) result, err := conn.AddLayerVersionPermission(params) if err != nil { return fmt.Errorf("Error adding lambda layer permissions: %s", err) } - log.Printf(aws.StringValue(result.Statement)) + // log.Printf(aws.StringValue(result.Statement)) d.SetId(*layerArn + ":" + strconv.FormatInt(*layerVersion, 10)) + d.Set("revision_id", result.RevisionId) return resourceAwsLambdaLayerVersionPermissionGet(d, meta) } @@ -103,7 +102,7 @@ func resourceAwsLambdaLayerVersionPermissionAdd(d *schema.ResourceData, meta int func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lambdaconn - layerName, version, err := resourceAwsLambdaLayerVersionParseId(d.Id()) + layerName, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) if err != nil { return fmt.Errorf("Error parsing lambda layer ID: %s", err) } @@ -114,46 +113,10 @@ func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta int }) log.Printf("[DEBUG] OUTPUT: %s", layerVersionPolicyOutput) - // if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { - // log.Printf("[WARN] Lambda Layer Version (%s) not found, removing from state", d.Id()) - // d.SetId("") - // return nil - // } - - // if err != nil { - // return fmt.Errorf("error reading Lambda Layer version (%s): %s", d.Id(), err) - // } - - // if err := d.Set("layer_name", layerName); err != nil { - // return fmt.Errorf("Error setting lambda layer name: %s", err) - // } - // if err := d.Set("version", strconv.FormatInt(version, 10)); err != nil { - // return fmt.Errorf("Error setting lambda layer version: %s", err) - // } - // if err := d.Set("arn", layerVersion.LayerVersionArn); err != nil { - // return fmt.Errorf("Error setting lambda layer version arn: %s", err) - // } - // if err := d.Set("layer_arn", layerVersion.LayerArn); err != nil { - // return fmt.Errorf("Error setting lambda layer arn: %s", err) - // } - // if err := d.Set("description", layerVersion.Description); err != nil { - // return fmt.Errorf("Error setting lambda layer description: %s", err) - // } - // if err := d.Set("license_info", layerVersion.LicenseInfo); err != nil { - // return fmt.Errorf("Error setting lambda layer license info: %s", err) - // } - // if err := d.Set("created_date", layerVersion.CreatedDate); err != nil { - // return fmt.Errorf("Error setting lambda layer created date: %s", err) - // } - // if err := d.Set("source_code_hash", layerVersion.Content.CodeSha256); err != nil { - // return fmt.Errorf("Error setting lambda layer source code hash: %s", err) - // } - // if err := d.Set("source_code_size", layerVersion.Content.CodeSize); err != nil { - // return fmt.Errorf("Error setting lambda layer source code size: %s", err) - // } - // if err := d.Set("compatible_runtimes", flattenStringList(layerVersion.CompatibleRuntimes)); err != nil { - // return fmt.Errorf("Error setting lambda layer compatible runtimes: %s", err) - // } + + if err != nil { + return fmt.Errorf("error reading Lambda Layer version permission (%s): %s", d.Id(), err) + } return nil } @@ -161,14 +124,14 @@ func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta int func resourceAwsLambdaLayerVersionPermissionRemove(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lambdaconn - layerName, version, err := resourceAwsLambdaLayerVersionParseId(d.Id()) + layerName, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) if err != nil { return fmt.Errorf("Error parsing lambda layer ID: %s", err) } _, err = conn.RemoveLayerVersionPermission(&lambda.RemoveLayerVersionPermissionInput{ LayerName: aws.String(layerName), - VersionNumber: aws.Int64(int64(version)), + VersionNumber: aws.Int64(version), StatementId: aws.String(d.Get("statement_id").(string)), }) if err != nil { @@ -178,3 +141,19 @@ func resourceAwsLambdaLayerVersionPermissionRemove(d *schema.ResourceData, meta log.Printf("[DEBUG] Lambda layer permission %q deleted", d.Get("statement_id").(string)) return nil } + +func resourceAwsLambdaLayerVersionPermissionParseId(id string) (layerName string, version int64, err error) { + arn, err := arn.Parse(id) + if err != nil { + return + } + parts := strings.Split(arn.Resource, ":") + if len(parts) != 3 || parts[0] != "layer" { + err = fmt.Errorf("lambda_layer ID must be a valid Layer ARN") + return + } + + layerName = parts[1] + version, err = strconv.ParseInt(parts[2], 10, 64) + return +} diff --git a/aws/resource_aws_lambda_layer_version_permission_test.go b/aws/resource_aws_lambda_layer_version_permission_test.go new file mode 100644 index 000000000000..cbc61be4d76b --- /dev/null +++ b/aws/resource_aws_lambda_layer_version_permission_test.go @@ -0,0 +1,132 @@ +package aws + +import ( + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +func init() { + resource.AddTestSweepers("aws_lambda_layer_version_permission", &resource.Sweeper{ + Name: "aws_lambda_layer_version_permission", + F: testSweepLambdaLayerVersionPermission, + }) +} + +func testSweepLambdaLayerVersionPermission(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + + lambdaconn := client.(*AWSClient).lambdaconn + resp, err := lambdaconn.ListLayers(&lambda.ListLayersInput{}) + if err != nil { + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Lambda Layer sweep for %s: %s", region, err) + return nil + } + return fmt.Errorf("Error retrieving Lambda layers: %s", err) + } + + if len(resp.Layers) == 0 { + log.Print("[DEBUG] No aws lambda layers to sweep") + return nil + } + + for _, l := range resp.Layers { + versionResp, err := lambdaconn.ListLayerVersions(&lambda.ListLayerVersionsInput{ + LayerName: l.LayerName, + }) + if err != nil { + return fmt.Errorf("Error retrieving versions for lambda layer: %s", err) + } + + for _, v := range versionResp.LayerVersions { + _, err := lambdaconn.DeleteLayerVersion(&lambda.DeleteLayerVersionInput{ + LayerName: l.LayerName, + VersionNumber: v.Version, + }) + if err != nil { + return err + } + } + } + + return nil +} + +func TestAccAWSLambdaLayerVersionPermission_basic(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" + layerName := fmt.Sprintf("tf_acc_lambda_layer_version_permission_testing_%s", acctest.RandString(8)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaLayerVersionDestroy2, + Steps: []resource.TestStep{ + { + Config: testAccAWSLambdaLayerVersionPermission(layerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", layerName), + resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), + resource.TestCheckResourceAttr(resourceName, "principal", "*"), + resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), + ), + }, + }, + }) +} + +func testAccAWSLambdaLayerVersionPermission(layerName string) string { + return fmt.Sprintf(` +resource "aws_lambda_layer_version" "lambda_layer" { + filename = "test-fixtures/lambdatest.zip" + layer_name = "%s" +} + +resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { + layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn + layer_version = aws_lambda_layer_version.lambda_layer.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "*" +} +`, layerName) +} + +func testAccCheckLambdaLayerVersionDestroy2(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).lambdaconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_lambda_layer_version" { + continue + } + + layerName, version, err := resourceAwsLambdaLayerVersionParseId(rs.Primary.ID) + if err != nil { + return err + } + + _, err = conn.GetLayerVersion(&lambda.GetLayerVersionInput{ + LayerName: aws.String(layerName), + VersionNumber: aws.Int64(version), + }) + if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { + continue + } + if err != nil { + return err + } + + return fmt.Errorf("Lambda Layer Version (%s) still exists", rs.Primary.ID) + } + + return nil +} From 0032072b9dc3d0feda905c91e2d4f610f4f816fe Mon Sep 17 00:00:00 2001 From: Anatoliy Volynskiy Date: Tue, 11 Feb 2020 01:36:25 +0200 Subject: [PATCH 005/304] add import support --- ...rce_aws_lambda_layer_version_permission.go | 74 ++++++++++++++++--- 1 file changed, 63 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_lambda_layer_version_permission.go b/aws/resource_aws_lambda_layer_version_permission.go index b609d821ee59..7f4adea62243 100644 --- a/aws/resource_aws_lambda_layer_version_permission.go +++ b/aws/resource_aws_lambda_layer_version_permission.go @@ -1,9 +1,10 @@ package aws import ( - // "errors" + "encoding/json" "fmt" "log" + "reflect" "strconv" "strings" @@ -11,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/lambda" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - // "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLambdaLayerVersionPermission() *schema.Resource { @@ -20,9 +20,9 @@ func resourceAwsLambdaLayerVersionPermission() *schema.Resource { Read: resourceAwsLambdaLayerVersionPermissionGet, Delete: resourceAwsLambdaLayerVersionPermissionRemove, - // Importer: &schema.ResourceImporter{ - // State: schema.ImportStatePassthrough, - // }, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "layer_arn": { @@ -59,6 +59,10 @@ func resourceAwsLambdaLayerVersionPermission() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "policy": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -91,8 +95,6 @@ func resourceAwsLambdaLayerVersionPermissionAdd(d *schema.ResourceData, meta int return fmt.Errorf("Error adding lambda layer permissions: %s", err) } - // log.Printf(aws.StringValue(result.Statement)) - d.SetId(*layerArn + ":" + strconv.FormatInt(*layerVersion, 10)) d.Set("revision_id", result.RevisionId) @@ -102,7 +104,7 @@ func resourceAwsLambdaLayerVersionPermissionAdd(d *schema.ResourceData, meta int func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lambdaconn - layerName, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) + layerName, layerArn, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) if err != nil { return fmt.Errorf("Error parsing lambda layer ID: %s", err) } @@ -112,19 +114,68 @@ func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta int VersionNumber: aws.Int64(version), }) - log.Printf("[DEBUG] OUTPUT: %s", layerVersionPolicyOutput) + if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Lambda Layer Version (%s) not found, removing it's permission from state", d.Id()) + d.SetId("") + return nil + } if err != nil { return fmt.Errorf("error reading Lambda Layer version permission (%s): %s", d.Id(), err) } + policyDoc := &IAMPolicyDoc{} + + if err := json.Unmarshal([]byte(aws.StringValue(layerVersionPolicyOutput.Policy)), policyDoc); err != nil { + return err + } + + principal := "" + identifiers := policyDoc.Statements[0].Principals[0].Identifiers + if reflect.TypeOf(identifiers).String() == "[]string" && identifiers.([]string)[0] == "*" { + principal = "*" + } else { + policy_principal_arn, err := arn.Parse(policyDoc.Statements[0].Principals[0].Identifiers.(string)) + if err != nil { + return fmt.Errorf("error reading principal arn from Lambda Layer version permission (%s): %s", d.Id(), err) + } + principal = policy_principal_arn.AccountID + } + + if err := d.Set("layer_arn", layerArn); err != nil { + return fmt.Errorf("Error setting lambda layer permission layer_arn: %s", err) + } + if err := d.Set("layer_version", version); err != nil { + return fmt.Errorf("Error setting lambda layer permission layer_version: %s", err) + } + if err := d.Set("statement_id", policyDoc.Statements[0].Sid); err != nil { + return fmt.Errorf("Error setting lambda layer permission statement_id: %s", err) + } + if err := d.Set("action", policyDoc.Statements[0].Actions); err != nil { + return fmt.Errorf("Error setting lambda layer permission action: %s", err) + } + if err := d.Set("principal", principal); err != nil { + return fmt.Errorf("Error setting lambda layer permission statement_id: %s", err) + } + if len(policyDoc.Statements[0].Conditions) > 0 { + if err := d.Set("organization_id", policyDoc.Statements[0].Conditions[0].Values.([]string)[0]); err != nil { + return fmt.Errorf("Error setting lambda layer permission organization_id: %s", err) + } + } + if err := d.Set("policy", layerVersionPolicyOutput.Policy); err != nil { + return fmt.Errorf("Error setting lambda layer permission policy: %s", err) + } + if err := d.Set("revision_id", layerVersionPolicyOutput.RevisionId); err != nil { + return fmt.Errorf("Error setting lambda layer permission revision_id: %s", err) + } + return nil } func resourceAwsLambdaLayerVersionPermissionRemove(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lambdaconn - layerName, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) + layerName, _, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) if err != nil { return fmt.Errorf("Error parsing lambda layer ID: %s", err) } @@ -142,7 +193,7 @@ func resourceAwsLambdaLayerVersionPermissionRemove(d *schema.ResourceData, meta return nil } -func resourceAwsLambdaLayerVersionPermissionParseId(id string) (layerName string, version int64, err error) { +func resourceAwsLambdaLayerVersionPermissionParseId(id string) (layerName string, layerARN string, version int64, err error) { arn, err := arn.Parse(id) if err != nil { return @@ -154,6 +205,7 @@ func resourceAwsLambdaLayerVersionPermissionParseId(id string) (layerName string } layerName = parts[1] + layerARN = strings.TrimSuffix(id, ":"+parts[2]) version, err = strconv.ParseInt(parts[2], 10, 64) return } From 61f7dcc19da229626c7916725286c865f9ecfd64 Mon Sep 17 00:00:00 2001 From: Anatoliy Volynskiy Date: Wed, 12 Feb 2020 00:14:30 +0200 Subject: [PATCH 006/304] Update tests, add documentation. --- ...ws_lambda_layer_version_permission_test.go | 163 ++++++++++++++++-- ...bda_layer_version_permission.html.markdown | 58 +++++++ 2 files changed, 211 insertions(+), 10 deletions(-) create mode 100644 website/docs/r/lambda_layer_version_permission.html.markdown diff --git a/aws/resource_aws_lambda_layer_version_permission_test.go b/aws/resource_aws_lambda_layer_version_permission_test.go index cbc61be4d76b..618f07cf1f14 100644 --- a/aws/resource_aws_lambda_layer_version_permission_test.go +++ b/aws/resource_aws_lambda_layer_version_permission_test.go @@ -62,29 +62,114 @@ func testSweepLambdaLayerVersionPermission(region string) error { return nil } -func TestAccAWSLambdaLayerVersionPermission_basic(t *testing.T) { +func TestAccAWSLambdaLayerVersionPermission_all(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" layerName := fmt.Sprintf("tf_acc_lambda_layer_version_permission_testing_%s", acctest.RandString(8)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy2, + CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission(layerName), + Config: testAccAWSLambdaLayerVersionPermission_all(layerName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", layerName), + testAccCheckAwsLambdaLayerVersionPermissionExists("aws_lambda_layer_version_permission.lambda_layer_permission", layerName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "*"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), ), }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSLambdaLayerVersionPermission_org(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" + layerName := fmt.Sprintf("tf_acc_lambda_layer_version_permission_testing_%s", acctest.RandString(8)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLambdaLayerVersionPermission_org(layerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", layerName), + testAccCheckAwsLambdaLayerVersionPermissionExists("aws_lambda_layer_version_permission.lambda_layer_permission", layerName), + resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), + resource.TestCheckResourceAttr(resourceName, "principal", "*"), + resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), + resource.TestCheckResourceAttr(resourceName, "organization_id", "o-0123456789"), + ), + }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSLambdaLayerVersionPermission_account(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" + layerName := fmt.Sprintf("tf_acc_lambda_layer_version_permission_testing_%s", acctest.RandString(8)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLambdaLayerVersionPermission_account(layerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", layerName), + testAccCheckAwsLambdaLayerVersionPermissionExists("aws_lambda_layer_version_permission.lambda_layer_permission", layerName), + resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), + resource.TestCheckResourceAttr(resourceName, "principal", "456789820214"), + resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), + ), + }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func testAccAWSLambdaLayerVersionPermission(layerName string) string { +// Creating Lambda layer and Lambda layer permissions + +func testAccAWSLambdaLayerVersionPermission_all(layerName string) string { + return fmt.Sprintf(` +resource "aws_lambda_layer_version" "lambda_layer" { + filename = "test-fixtures/lambdatest.zip" + layer_name = "%s" +} + +resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { + layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn + layer_version = aws_lambda_layer_version.lambda_layer.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "*" +} +`, layerName) +} + +func testAccAWSLambdaLayerVersionPermission_org(layerName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer" { filename = "test-fixtures/lambdatest.zip" @@ -97,36 +182,94 @@ resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { action = "lambda:GetLayerVersion" statement_id = "xaccount" principal = "*" + organization_id = "o-0123456789" +} +`, layerName) +} + +func testAccAWSLambdaLayerVersionPermission_account(layerName string) string { + return fmt.Sprintf(` +resource "aws_lambda_layer_version" "lambda_layer" { + filename = "test-fixtures/lambdatest.zip" + layer_name = "%s" +} + +resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { + layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn + layer_version = aws_lambda_layer_version.lambda_layer.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "456789820214" } `, layerName) } -func testAccCheckLambdaLayerVersionDestroy2(s *terraform.State) error { +func testAccCheckAwsLambdaLayerVersionPermissionExists(res, layerName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[res] + if !ok { + return fmt.Errorf("Lambda Layer version permission not found: %s", res) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Lambda Layer version policy ID not set") + } + + if rs.Primary.Attributes["revision_id"] == "" { + return fmt.Errorf("Lambda Layer Version Permission not set") + } + + _, _, version, err := resourceAwsLambdaLayerVersionPermissionParseId(rs.Primary.Attributes["id"]) + if err != nil { + return fmt.Errorf("Error parsing lambda layer ID: %s", err) + } + + conn := testAccProvider.Meta().(*AWSClient).lambdaconn + + _, err = conn.GetLayerVersionPolicy(&lambda.GetLayerVersionPolicyInput{ + LayerName: aws.String(layerName), + VersionNumber: aws.Int64(version), + }) + + if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { + return err + } + + return err + } +} + +func testAccCheckLambdaLayerVersionPermissionDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).lambdaconn for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_lambda_layer_version" { + if rs.Type != "aws_lambda_layer_version_permission" { continue } - layerName, version, err := resourceAwsLambdaLayerVersionParseId(rs.Primary.ID) + layerName, _, version, err := resourceAwsLambdaLayerVersionPermissionParseId(rs.Primary.ID) if err != nil { return err } - _, err = conn.GetLayerVersion(&lambda.GetLayerVersionInput{ + _, err = conn.GetLayerVersionPolicy(&lambda.GetLayerVersionPolicyInput{ LayerName: aws.String(layerName), VersionNumber: aws.Int64(version), }) + if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { continue } if err != nil { return err } + // as I've created Lambda layer, not only layer permission, need to check if layer was destroyed. + err = testAccCheckLambdaLayerVersionDestroy(s) + if err != nil { + return err + } - return fmt.Errorf("Lambda Layer Version (%s) still exists", rs.Primary.ID) + return fmt.Errorf("Lambda Layer Version Permission (%s) still exists", rs.Primary.ID) } - return nil } diff --git a/website/docs/r/lambda_layer_version_permission.html.markdown b/website/docs/r/lambda_layer_version_permission.html.markdown new file mode 100644 index 000000000000..a6f1c6e15162 --- /dev/null +++ b/website/docs/r/lambda_layer_version_permission.html.markdown @@ -0,0 +1,58 @@ +--- +subcategory: "Lambda" +layout: "aws" +page_title: "AWS: aws_lambda_layer_version_permission" +description: |- + Provides a Lambda Layer Version Permission resource. It allows you to share you own Lambda Layers to another account by account ID, to all accounts in AWS organization or even to all AWS accounts. +--- + +# Resource: aws_lambda_layer_version_permission + +Provides a Lambda Layer Version Permission resource. It allows you to share you own Lambda Layers to another account by account ID, to all accounts in AWS organization or even to all AWS accounts. + +For information about Lambda Layer Permissions and how to use them, see [Using Resource-based Policies for AWS Lambda][1] + +## Example Usage + +```hcl +resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { + layer_arn = "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1" + layer_version = 1 + principal = "111111111111" + action = "lambda:GetLayerVersion" + statement_id = "dev-account" +} +``` + +## Argument Reference + +* `layer_arn` (Required) ARN of the Lambda Layer, which you want to grant access to. +* `layer_version` (Required) Version of Lambda Layer, which you want to grant access to. Note: permissions only apply to a single version of a layer. +* `principal` - (Required) AWS account ID which should be able to use your Lambda Layer. `*` can be used here, if you want to share your Lambda Layer widely. +* `organization_id` - (Optional) An identifier of AWS Organization, which should be able to use your Lambda Layer. `principal` should be equal to `*` if `organization_id` provided. +* `action` - (Required) Action, which will be allowed. `lambda:GetLayerVersion` value is suggested by AWS documantation. +* `statement_id` - (Required) The name of Lambda Layer Permission, for example `dev-account` - human readable note about what is this permission for. + + +## Attributes Reference + +* `layer_arn` - The Amazon Resource Name (ARN) of the Lambda Layer without version. +* `layer_version` - The version of Lambda Layer. +* `principal` - The principal which was granted access to your Lambda Layer. +* `organization_id` - The AWS Organization which was granted access to your Lambda Layer. +* `action` - Action, which is allowed to principal. +* `statement_id` - Human readable name of Lambda Layer Permission. +* `revision_id` - Identifier of Lambda Layer Permission. +* `policy` - Full Lambda Layer Permission policy. + +[1]: https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountlayer + +## Import + +Lambda Layer Permissions can be imported using `arn`. + +``` +$ terraform import \ + aws_lambda_layer_version_permission.lambda_layer_permission \ + arn:aws:lambda:_REGION_:_ACCOUNT_ID_:layer:_LAYER_NAME_:_LAYER_VERSION_ +``` From 935db4811428828ad75e5d5b5170bb695bbd9724 Mon Sep 17 00:00:00 2001 From: Anatoliy Volynskiy Date: Sun, 30 Aug 2020 16:01:04 +0300 Subject: [PATCH 007/304] minor fixes, add disappear test --- ...rce_aws_lambda_layer_version_permission.go | 47 ++++++------- ...ws_lambda_layer_version_permission_test.go | 70 +++++++++++++++---- 2 files changed, 78 insertions(+), 39 deletions(-) diff --git a/aws/resource_aws_lambda_layer_version_permission.go b/aws/resource_aws_lambda_layer_version_permission.go index 7f4adea62243..28bd48e09234 100644 --- a/aws/resource_aws_lambda_layer_version_permission.go +++ b/aws/resource_aws_lambda_layer_version_permission.go @@ -16,9 +16,9 @@ import ( func resourceAwsLambdaLayerVersionPermission() *schema.Resource { return &schema.Resource{ - Create: resourceAwsLambdaLayerVersionPermissionAdd, - Read: resourceAwsLambdaLayerVersionPermissionGet, - Delete: resourceAwsLambdaLayerVersionPermissionRemove, + Create: resourceAwsLambdaLayerVersionPermissionCreate, + Read: resourceAwsLambdaLayerVersionPermissionRead, + Delete: resourceAwsLambdaLayerVersionPermissionDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -26,9 +26,10 @@ func resourceAwsLambdaLayerVersionPermission() *schema.Resource { Schema: map[string]*schema.Schema{ "layer_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + ValidateFunc: validateArn, + Required: true, + ForceNew: true, }, "layer_version": { Type: schema.TypeInt, @@ -67,41 +68,33 @@ func resourceAwsLambdaLayerVersionPermission() *schema.Resource { } } -func resourceAwsLambdaLayerVersionPermissionAdd(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLambdaLayerVersionPermissionCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lambdaconn - layerArn := aws.String(d.Get("layer_arn").(string)) - layerVersion := aws.Int64(int64(d.Get("layer_version").(int))) - statementId := aws.String(d.Get("statement_id").(string)) - principal := aws.String(d.Get("principal").(string)) - organizationId, hasOrganizationId := d.GetOk("organization_id") - action := aws.String(d.Get("action").(string)) - params := &lambda.AddLayerVersionPermissionInput{ - LayerName: layerArn, - VersionNumber: layerVersion, - Action: action, - Principal: principal, - StatementId: statementId, + LayerName: aws.String(d.Get("layer_arn").(string)), + VersionNumber: aws.Int64(int64(d.Get("layer_version").(int))), + Action: aws.String(d.Get("action").(string)), + Principal: aws.String(d.Get("principal").(string)), + StatementId: aws.String(d.Get("statement_id").(string)), } - if hasOrganizationId { - params.OrganizationId = aws.String(organizationId.(string)) + if d.Get("organization_id").(string) != "" { + params.OrganizationId = aws.String(d.Get("organization_id").(string)) } log.Printf("[DEBUG] Adding Lambda layer permissions: %s", params) - result, err := conn.AddLayerVersionPermission(params) + _, err := conn.AddLayerVersionPermission(params) if err != nil { return fmt.Errorf("Error adding lambda layer permissions: %s", err) } - d.SetId(*layerArn + ":" + strconv.FormatInt(*layerVersion, 10)) - d.Set("revision_id", result.RevisionId) + d.SetId(fmt.Sprintf("%s:%s", *params.LayerName, strconv.FormatInt(*params.VersionNumber, 10))) - return resourceAwsLambdaLayerVersionPermissionGet(d, meta) + return resourceAwsLambdaLayerVersionPermissionRead(d, meta) } -func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLambdaLayerVersionPermissionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lambdaconn layerName, layerArn, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) @@ -172,7 +165,7 @@ func resourceAwsLambdaLayerVersionPermissionGet(d *schema.ResourceData, meta int return nil } -func resourceAwsLambdaLayerVersionPermissionRemove(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLambdaLayerVersionPermissionDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lambdaconn layerName, _, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) diff --git a/aws/resource_aws_lambda_layer_version_permission_test.go b/aws/resource_aws_lambda_layer_version_permission_test.go index 618f07cf1f14..2c871097725e 100644 --- a/aws/resource_aws_lambda_layer_version_permission_test.go +++ b/aws/resource_aws_lambda_layer_version_permission_test.go @@ -64,7 +64,7 @@ func testSweepLambdaLayerVersionPermission(region string) error { func TestAccAWSLambdaLayerVersionPermission_all(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" - layerName := fmt.Sprintf("tf_acc_lambda_layer_version_permission_testing_%s", acctest.RandString(8)) + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -72,13 +72,14 @@ func TestAccAWSLambdaLayerVersionPermission_all(t *testing.T) { CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission_all(layerName), + Config: testAccAWSLambdaLayerVersionPermission_all(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", layerName), - testAccCheckAwsLambdaLayerVersionPermissionExists("aws_lambda_layer_version_permission.lambda_layer_permission", layerName), + testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", rName), + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "*"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), + resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.lambda_layer", "layer_arn"), ), }, @@ -93,7 +94,7 @@ func TestAccAWSLambdaLayerVersionPermission_all(t *testing.T) { func TestAccAWSLambdaLayerVersionPermission_org(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" - layerName := fmt.Sprintf("tf_acc_lambda_layer_version_permission_testing_%s", acctest.RandString(8)) + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -101,14 +102,15 @@ func TestAccAWSLambdaLayerVersionPermission_org(t *testing.T) { CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission_org(layerName), + Config: testAccAWSLambdaLayerVersionPermission_org(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", layerName), - testAccCheckAwsLambdaLayerVersionPermissionExists("aws_lambda_layer_version_permission.lambda_layer_permission", layerName), + testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", rName), + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "*"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), resource.TestCheckResourceAttr(resourceName, "organization_id", "o-0123456789"), + resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.lambda_layer", "layer_arn"), ), }, @@ -123,7 +125,7 @@ func TestAccAWSLambdaLayerVersionPermission_org(t *testing.T) { func TestAccAWSLambdaLayerVersionPermission_account(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" - layerName := fmt.Sprintf("tf_acc_lambda_layer_version_permission_testing_%s", acctest.RandString(8)) + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -131,13 +133,14 @@ func TestAccAWSLambdaLayerVersionPermission_account(t *testing.T) { CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission_account(layerName), + Config: testAccAWSLambdaLayerVersionPermission_account(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", layerName), - testAccCheckAwsLambdaLayerVersionPermissionExists("aws_lambda_layer_version_permission.lambda_layer_permission", layerName), + testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", rName), + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "456789820214"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), + resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.lambda_layer", "layer_arn"), ), }, @@ -150,6 +153,27 @@ func TestAccAWSLambdaLayerVersionPermission_account(t *testing.T) { }) } +func TestAccAWSLambdaLayerVersionPermission_disappears(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLambdaLayerVersionPermission_account(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), + testAccCheckAwsLambdaLayerVersionPermissionDisappears(resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + // Creating Lambda layer and Lambda layer permissions func testAccAWSLambdaLayerVersionPermission_all(layerName string) string { @@ -273,3 +297,25 @@ func testAccCheckLambdaLayerVersionPermissionDestroy(s *terraform.State) error { } return nil } + +func testAccCheckAwsLambdaLayerVersionPermissionDisappears(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, _ := s.RootModule().Resources[resourceName] + + conn := testAccProvider.Meta().(*AWSClient).lambdaconn + + layerName, _, version, _ := resourceAwsLambdaLayerVersionPermissionParseId(rs.Primary.Attributes["id"]) + + req := &lambda.RemoveLayerVersionPermissionInput{ + LayerName: aws.String(layerName), + VersionNumber: aws.Int64(version), + StatementId: aws.String(rs.Primary.Attributes["statement_id"]), + } + + _, err2 := conn.RemoveLayerVersionPermission(req) + if err2 != nil { + return err2 + } + return nil + } +} From 5b239c97bbf0edd22708189c78aae1d2d94d4332 Mon Sep 17 00:00:00 2001 From: Anatoliy Volynskiy Date: Sun, 30 Aug 2020 21:05:46 +0300 Subject: [PATCH 008/304] switch tf-plugin-sdk to v2 --- aws/resource_aws_lambda_layer_version_permission.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_lambda_layer_version_permission.go b/aws/resource_aws_lambda_layer_version_permission.go index 28bd48e09234..7685e8de2098 100644 --- a/aws/resource_aws_lambda_layer_version_permission.go +++ b/aws/resource_aws_lambda_layer_version_permission.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceAwsLambdaLayerVersionPermission() *schema.Resource { From e1e1a05a4f73b3962122514730c551cac2cc2f26 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 16 Feb 2021 16:59:04 -0500 Subject: [PATCH 009/304] update plugin SDK v2; tflint and website lint --- ...ws_lambda_layer_version_permission_test.go | 74 +++++++------------ ...bda_layer_version_permission.html.markdown | 8 +- 2 files changed, 30 insertions(+), 52 deletions(-) diff --git a/aws/resource_aws_lambda_layer_version_permission_test.go b/aws/resource_aws_lambda_layer_version_permission_test.go index 2c871097725e..8524befcf5db 100644 --- a/aws/resource_aws_lambda_layer_version_permission_test.go +++ b/aws/resource_aws_lambda_layer_version_permission_test.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func init() { @@ -166,7 +166,7 @@ func TestAccAWSLambdaLayerVersionPermission_disappears(t *testing.T) { Config: testAccAWSLambdaLayerVersionPermission_account(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), - testAccCheckAwsLambdaLayerVersionPermissionDisappears(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsLambdaLayerVersionPermission(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -179,16 +179,16 @@ func TestAccAWSLambdaLayerVersionPermission_disappears(t *testing.T) { func testAccAWSLambdaLayerVersionPermission_all(layerName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer" { - filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + filename = "test-fixtures/lambdatest.zip" + layer_name = "%s" } resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn - layer_version = aws_lambda_layer_version.lambda_layer.version - action = "lambda:GetLayerVersion" - statement_id = "xaccount" - principal = "*" + layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn + layer_version = aws_lambda_layer_version.lambda_layer.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "*" } `, layerName) } @@ -196,17 +196,17 @@ resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { func testAccAWSLambdaLayerVersionPermission_org(layerName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer" { - filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + filename = "test-fixtures/lambdatest.zip" + layer_name = "%s" } resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn - layer_version = aws_lambda_layer_version.lambda_layer.version - action = "lambda:GetLayerVersion" - statement_id = "xaccount" - principal = "*" - organization_id = "o-0123456789" + layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn + layer_version = aws_lambda_layer_version.lambda_layer.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "*" + organization_id = "o-0123456789" } `, layerName) } @@ -214,16 +214,16 @@ resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { func testAccAWSLambdaLayerVersionPermission_account(layerName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer" { - filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + filename = "test-fixtures/lambdatest.zip" + layer_name = "%s" } resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn - layer_version = aws_lambda_layer_version.lambda_layer.version - action = "lambda:GetLayerVersion" - statement_id = "xaccount" - principal = "456789820214" + layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn + layer_version = aws_lambda_layer_version.lambda_layer.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "456789820214" } `, layerName) } @@ -297,25 +297,3 @@ func testAccCheckLambdaLayerVersionPermissionDestroy(s *terraform.State) error { } return nil } - -func testAccCheckAwsLambdaLayerVersionPermissionDisappears(resourceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, _ := s.RootModule().Resources[resourceName] - - conn := testAccProvider.Meta().(*AWSClient).lambdaconn - - layerName, _, version, _ := resourceAwsLambdaLayerVersionPermissionParseId(rs.Primary.Attributes["id"]) - - req := &lambda.RemoveLayerVersionPermissionInput{ - LayerName: aws.String(layerName), - VersionNumber: aws.Int64(version), - StatementId: aws.String(rs.Primary.Attributes["statement_id"]), - } - - _, err2 := conn.RemoveLayerVersionPermission(req) - if err2 != nil { - return err2 - } - return nil - } -} diff --git a/website/docs/r/lambda_layer_version_permission.html.markdown b/website/docs/r/lambda_layer_version_permission.html.markdown index a6f1c6e15162..53772d8c6d9a 100644 --- a/website/docs/r/lambda_layer_version_permission.html.markdown +++ b/website/docs/r/lambda_layer_version_permission.html.markdown @@ -16,11 +16,11 @@ For information about Lambda Layer Permissions and how to use them, see [Using R ```hcl resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1" + layer_arn = "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1" layer_version = 1 - principal = "111111111111" - action = "lambda:GetLayerVersion" - statement_id = "dev-account" + principal = "111111111111" + action = "lambda:GetLayerVersion" + statement_id = "dev-account" } ``` From b7356a7d5fca49da9ad751ecb2e70bb1425e8149 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:55:05 -0700 Subject: [PATCH 010/304] init setup for replication configuration resource Blocking out general structure for new independent resource for managing the s3 bucket replication configuration settings Pulling over logic from resource s3 bucket to start with --- aws/resource_aws_s3_bucket.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 54af265517f1..25dff08edf12 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -789,7 +789,7 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("replication_configuration") { - if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil { + if err := resourceAwsS3BucketInternalReplicationConfigurationUpdate(s3conn, d); err != nil { return err } } @@ -1998,7 +1998,7 @@ func resourceAwsS3BucketObjectLockConfigurationUpdate(s3conn *s3.S3, d *schema.R return nil } -func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { +func resourceAwsS3BucketInternalReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) replicationConfiguration := d.Get("replication_configuration").([]interface{}) From d8acd7a94e7aed1f59d20ac2baf4779a7089ee10 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:57:33 -0700 Subject: [PATCH 011/304] adding new resource for replication configurations --- aws/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/provider.go b/aws/provider.go index 3f9f86dacc41..5641b36adb4e 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -1047,6 +1047,7 @@ func Provider() *schema.Provider { "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), "aws_s3_bucket_metric": resourceAwsS3BucketMetric(), "aws_s3_bucket_inventory": resourceAwsS3BucketInventory(), + "aws_s3_bucket_replication_configuration": resourceAwsS3BucketReplicationConfiguration(), "aws_s3_object_copy": resourceAwsS3ObjectCopy(), "aws_s3control_bucket": resourceAwsS3ControlBucket(), "aws_s3control_bucket_policy": resourceAwsS3ControlBucketPolicy(), From 583894f05786fd586a647e481ea334f7d40515fe Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 11:35:47 -0700 Subject: [PATCH 012/304] cleanup and remove unneeded logic --- ...aws_s3_bucket_replication_configuration.go | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go new file mode 100644 index 000000000000..400a2f12c5b8 --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -0,0 +1,215 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketReplicationConfigurationCreate, + Read: resourceAwsS3BucketReplicationConfigurationRead, + Update: resourceAwsS3BucketReplicationConfigurationUpdate, + Delete: resourceAwsS3BucketReplicationConfigurationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"bucket_prefix"}, + ValidateFunc: validation.StringLenBetween(0, 63), + }, + "role": { + Type: schema.TypeString, + Required: true, + }, + "rules": { + Type: schema.TypeSet, + Required: true, + Set: rulesHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), + }, + "destination": { + Type: schema.TypeList, + MaxItems: 1, + MinItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsAccountId, + }, + "bucket": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + }, + "replica_kms_key_id": { + Type: schema.TypeString, + Optional: true, + }, + "access_control_translation": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + }, + }, + }, + }, + }, + }, + }, + "source_selection_criteria": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sse_kms_encrypted_objects": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "tags": tagsSchema(), + }, + }, + }, + "delete_marker_replication_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + }, + }, + }, + }, + + "tags": tagsSchema(), + "tags_all": tagsSchemaComputed(), + }, + + CustomizeDiff: SetTagsDiff, + } +} + +func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + input := &s3.HeadBucketInput{ + Bucket: aws.String(d.Get("bucket").(string)), + } + + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + _, err := s3conn.HeadBucket(input) + + if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + return resource.RetryableError(err) + } + + if d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + // Read the bucket replication configuration + replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(d.Get("bucket").(string)), + }) + }) + if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { + return fmt.Errorf("error getting S3 Bucket replication: %s", err) + } + replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) + if !ok || replication == nil { + return fmt.Errorf("error reading replication_configuration") + } + + return nil +} + +func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + + return nil +} From 457754ae5aefa05111cee5ebdb29cd30e2b2d045 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:14:09 -0700 Subject: [PATCH 013/304] WIP setup update processes --- ...aws_s3_bucket_replication_configuration.go | 152 ++++++++++++++++-- 1 file changed, 142 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 400a2f12c5b8..74af667e458c 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -23,12 +26,11 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"bucket_prefix"}, - ValidateFunc: validation.StringLenBetween(0, 63), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(0, 63), }, "role": { Type: schema.TypeString, @@ -164,10 +166,6 @@ func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, m return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } -func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - return resourceAwsS3BucketReplicationConfigurationRead(d, meta) -} - func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn @@ -209,6 +207,140 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil } +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + rc := &s3.ReplicationConfiguration{} + if val, ok := d.GetOk("role"); ok { + rc.Role = aws.String(val.(string)) + } + + rcRules := d.Get("rules").(*schema.Set).List() + rules := []*s3.ReplicationRule{} + for _, v := range rcRules { + rr := v.(map[string]interface{}) + rcRule := &s3.ReplicationRule{} + if status, ok := rr["status"]; ok && status != "" { + rcRule.Status = aws.String(status.(string)) + } else { + continue + } + + if rrid, ok := rr["id"]; ok && rrid != "" { + rcRule.ID = aws.String(rrid.(string)) + } + + ruleDestination := &s3.Destination{} + if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { + if dest[0] != nil { + bd := dest[0].(map[string]interface{}) + ruleDestination.Bucket = aws.String(bd["bucket"].(string)) + + if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { + ruleDestination.StorageClass = aws.String(storageClass.(string)) + } + + if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" { + ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)), + } + } + + if account, ok := bd["account_id"]; ok && account != "" { + ruleDestination.Account = aws.String(account.(string)) + } + + if aclTranslation, ok := bd["access_control_translation"].([]interface{}); ok && len(aclTranslation) > 0 { + aclTranslationValues := aclTranslation[0].(map[string]interface{}) + ruleAclTranslation := &s3.AccessControlTranslation{} + ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) + ruleDestination.AccessControlTranslation = ruleAclTranslation + } + } + } + rcRule.Destination = ruleDestination + + if ssc, ok := rr["source_selection_criteria"].([]interface{}); ok && len(ssc) > 0 { + if ssc[0] != nil { + sscValues := ssc[0].(map[string]interface{}) + ruleSsc := &s3.SourceSelectionCriteria{} + if sseKms, ok := sscValues["sse_kms_encrypted_objects"].([]interface{}); ok && len(sseKms) > 0 { + if sseKms[0] != nil { + sseKmsValues := sseKms[0].(map[string]interface{}) + sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} + if sseKmsValues["enabled"].(bool) { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) + } else { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) + } + ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects + } + } + rcRule.SourceSelectionCriteria = ruleSsc + } + } + + if f, ok := rr["filter"].([]interface{}); ok && len(f) > 0 && f[0] != nil { + // XML schema V2. + rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) + rcRule.Filter = &s3.ReplicationRuleFilter{} + filter := f[0].(map[string]interface{}) + tags := keyvaluetags.New(filter["tags"]).IgnoreAws().S3Tags() + if len(tags) > 0 { + rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(filter["prefix"].(string)), + Tags: tags, + } + } else { + rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) + } + + if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(dmr), + } + } else { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + } + } + } else { + // XML schema V1. + rcRule.Prefix = aws.String(rr["prefix"].(string)) + } + + rules = append(rules, rcRule) + } + + rc.Rules = rules + i := &s3.PutBucketReplicationInput{ + Bucket: aws.String(bucket), + ReplicationConfiguration: rc, + } + log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := s3conn.PutBucketReplication(i) + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketReplication(i) + } + if err != nil { + return fmt.Errorf("Error putting S3 replication configuration: %s", err) + } + + return nil + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { return nil From 22360e8bd646550edd38d6f7b924c135b0a5fccb Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:15:06 -0700 Subject: [PATCH 014/304] WIP pull in tests from s3 bucket resource --- ...3_bucket_replication_configuration_test.go | 1461 +++++++++++++++++ 1 file changed, 1461 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration_test.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go new file mode 100644 index 000000000000..ed348eb3c912 --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -0,0 +1,1461 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "regexp" + "testing" +) + +func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "GLACIER"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassGlacier), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "prefix3", + "filter.0.tags.%": "1", + "filter.0.tags.Key3": "Value3", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { + // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + +// StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 +func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { + rInt := acctest.RandInt() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), + ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), + }, + }, + }) +} + +// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 +func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + }, + }, + }, + Priority: aws.Int64(42), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String("foo"), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + }, + }, + }, + Priority: aws.Int64(41), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + { + Key: aws.String("Foo"), + Value: aws.String("Bar"), + }, + }, + }, + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationResourceName := "aws_s3_bucket.destination" + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExists(destinationResourceName), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + +func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { + return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = "tf-iam-role-replication-%[1]d" + + assume_role_policy = < Date: Thu, 19 Aug 2021 11:04:42 -0700 Subject: [PATCH 015/304] WIP ensure create/read/update logic is operational --- ...aws_s3_bucket_replication_configuration.go | 127 +++++++++++++++++- ...3_bucket_replication_configuration_test.go | 33 ++++- 2 files changed, 153 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 74af667e458c..4f8cc098c2a3 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,8 +2,8 @@ package aws import ( "fmt" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "log" + "net/http" "time" "github.com/aws/aws-sdk-go/aws" @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ - Create: resourceAwsS3BucketReplicationConfigurationCreate, + Create: resourceAwsS3BucketReplicationConfigurationPut, Read: resourceAwsS3BucketReplicationConfigurationRead, Update: resourceAwsS3BucketReplicationConfigurationUpdate, Delete: resourceAwsS3BucketReplicationConfigurationDelete, @@ -162,7 +164,16 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { } } -func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { +func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta interface{}) error { + // Get the bucket + var bucket string + if v, ok := d.GetOk("bucket"); ok { + bucket = v.(string) + } else { + // fail, can't do anything without a bucket + } + d.SetId(bucket) + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } @@ -176,7 +187,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) - if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { return resource.RetryableError(err) } @@ -190,6 +201,29 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil }) + + if tfresource.TimedOut(err) { + _, err = s3conn.HeadBucket(input) + } + + if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) + } + + if _, ok := d.GetOk("bucket"); !ok { + d.Set("bucket", d.Id()) + } + // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ @@ -203,6 +237,90 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if !ok || replication == nil { return fmt.Errorf("error reading replication_configuration") } + r := replication.ReplicationConfiguration + // set role + if r.Role != nil && aws.StringValue(r.Role) != "" { + d.Set("role", aws.StringValue(r.Role)) + } + + // set rules, these need to be flattened + rules := make([]interface{}, 0, len(r.Rules)) + for _, v := range r.Rules { + t := make(map[string]interface{}) + if v.Destination != nil { + rd := make(map[string]interface{}) + if v.Destination.Bucket != nil { + rd["bucket"] = aws.StringValue(v.Destination.Bucket) + } + if v.Destination.StorageClass != nil { + rd["storage_class"] = aws.StringValue(v.Destination.StorageClass) + } + if v.Destination.EncryptionConfiguration != nil { + if v.Destination.EncryptionConfiguration.ReplicaKmsKeyID != nil { + rd["replica_kms_key_id"] = aws.StringValue(v.Destination.EncryptionConfiguration.ReplicaKmsKeyID) + } + } + if v.Destination.Account != nil { + rd["account_id"] = aws.StringValue(v.Destination.Account) + } + if v.Destination.AccessControlTranslation != nil { + rdt := map[string]interface{}{ + "owner": aws.StringValue(v.Destination.AccessControlTranslation.Owner), + } + rd["access_control_translation"] = []interface{}{rdt} + } + t["destination"] = []interface{}{rd} + } + + if v.ID != nil { + t["id"] = aws.StringValue(v.ID) + } + if v.Prefix != nil { + t["prefix"] = aws.StringValue(v.Prefix) + } + if v.Status != nil { + t["status"] = aws.StringValue(v.Status) + } + if vssc := v.SourceSelectionCriteria; vssc != nil { + tssc := make(map[string]interface{}) + if vssc.SseKmsEncryptedObjects != nil { + tSseKms := make(map[string]interface{}) + if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { + tSseKms["enabled"] = true + } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { + tSseKms["enabled"] = false + } + tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} + } + t["source_selection_criteria"] = []interface{}{tssc} + } + + if v.Priority != nil { + t["priority"] = int(aws.Int64Value(v.Priority)) + } + + if f := v.Filter; f != nil { + m := map[string]interface{}{} + if f.Prefix != nil { + m["prefix"] = aws.StringValue(f.Prefix) + } + if t := f.Tag; t != nil { + m["tags"] = keyvaluetags.S3KeyValueTags([]*s3.Tag{t}).IgnoreAws().Map() + } + if a := f.And; a != nil { + m["prefix"] = aws.StringValue(a.Prefix) + m["tags"] = keyvaluetags.S3KeyValueTags(a.Tags).IgnoreAws().Map() + } + t["filter"] = []interface{}{m} + + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { + t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + } + } + + rules = append(rules, t) + } + d.Set("rules", schema.NewSet(rulesHash, rules)) return nil } @@ -337,7 +455,6 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m return fmt.Errorf("Error putting S3 replication configuration: %s", err) } - return nil return resourceAwsS3BucketReplicationConfigurationRead(d, meta) } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index ed348eb3c912..c12697906f40 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,15 +2,44 @@ package aws import ( "fmt" + "regexp" + "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "regexp" - "testing" ) +func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { + rInt := acctest.RandInt() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + ), + }, + }, + }) +} + func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() From 2d0f9cea737d7815c09c9756b188bee1502ba1ff Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 19 Aug 2021 14:05:53 -0700 Subject: [PATCH 016/304] basic tests passing --- ...aws_s3_bucket_replication_configuration.go | 5 - ...3_bucket_replication_configuration_test.go | 92 +++++-------------- 2 files changed, 23 insertions(+), 74 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4f8cc098c2a3..5742d810d4bc 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -155,12 +155,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, - - "tags": tagsSchema(), - "tags_all": tagsSchemaComputed(), }, - - CustomizeDiff: SetTagsDiff, } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c12697906f40..f872ab698f1a 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -12,34 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { - rInt := acctest.RandInt() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - ), - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() @@ -902,6 +874,10 @@ resource "aws_s3_bucket" "destination" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "source" { @@ -910,6 +886,10 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } `, randInt) } @@ -945,6 +925,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -954,6 +937,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1014,6 +1000,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -1023,6 +1012,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1094,6 +1086,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1316,47 +1311,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { `) } -func testAccAWSS3BucketReplicationConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination string) string { - return composeConfig(testAccAWSS3BucketReplicationConfig_iamPolicy(rName), fmt.Sprintf(` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = %[1]q - acl = "private" - - versioning { - enabled = true - } - - replication_configuration { - role = aws_iam_role.test.arn - - rules { - id = "testid" - status = "Enabled" - - filter { - prefix = "testprefix" - } - - delete_marker_replication_status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } - } -} - -resource "aws_s3_bucket_replication_configuration" "destination" { - bucket = %[2]q - - versioning { - enabled = true - } -} -`, rName, rNameDestination)) -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { From c08a294393ccde4d025dd1721cbf3d4037547a7b Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 15:27:31 -0700 Subject: [PATCH 017/304] Update expected resource names Rename resource names to reflect new position in configuration scope of the independent resource. Use literal strings instead of fmt.Sprint in hcl concatination --- ...3_bucket_replication_configuration_test.go | 169 +++++++----------- 1 file changed, 66 insertions(+), 103 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index f872ab698f1a..4ac4e078867e 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -109,7 +108,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -130,9 +129,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -141,7 +139,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -150,7 +148,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -176,7 +174,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -197,9 +195,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -208,7 +205,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -218,7 +215,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -247,7 +244,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -267,9 +264,8 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -278,7 +274,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -306,7 +302,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -324,9 +320,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -358,9 +353,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -399,7 +393,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -417,9 +411,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -448,9 +441,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -480,7 +472,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -512,35 +504,12 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { - rInt := acctest.RandInt() - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), - ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), - }, - }, - }) -} - // Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -578,7 +547,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -596,9 +565,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -626,9 +594,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -663,9 +630,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -701,9 +667,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -743,9 +708,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -790,7 +754,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" rName := acctest.RandomWithPrefix("tf-acc-test") destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") @@ -805,9 +769,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -943,7 +906,7 @@ resource "aws_s3_bucket" "destination3" { } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn rules { @@ -984,7 +947,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } - } + } `, randInt)) } @@ -1131,7 +1094,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_kms_key" "replica" { provider = "awsalternate" description = "TF Acceptance Test S3 repl KMS key" @@ -1160,11 +1123,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1187,11 +1150,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1210,11 +1173,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_kms_key" "replica" { @@ -1250,11 +1213,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1269,11 +1232,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1288,11 +1251,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1308,11 +1271,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1331,11 +1294,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1356,11 +1319,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1383,11 +1346,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1413,11 +1376,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1440,5 +1403,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } From 61573bdd180ef682eac03fef31c9fcc49e90193d Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 16:03:59 -0700 Subject: [PATCH 018/304] Guard against missing bucket or import id Ensure that the source bucket name is configured in the HCL Ensure that when importing the bucket name is passed in to the process as the import id value --- ...aws_s3_bucket_replication_configuration.go | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 5742d810d4bc..1eb8c0334882 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -1,6 +1,7 @@ package aws import ( + "errors" "fmt" "log" "net/http" @@ -165,7 +166,8 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta if v, ok := d.GetOk("bucket"); ok { bucket = v.(string) } else { - // fail, can't do anything without a bucket + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") } d.SetId(bucket) @@ -173,12 +175,24 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta } func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - input := &s3.HeadBucketInput{ - Bucket: aws.String(d.Get("bucket").(string)), + if _, ok := d.GetOk("bucket"); !ok { + // during import operations, use the supplied ID for the bucket name + d.Set("bucket", d.Id()) + } + + var bucket *string + input := &s3.HeadBucketInput{} + if rsp, ok := d.GetOk("bucket"); !ok { + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") + } else { + bucket = aws.String(rsp.(string)) + input.Bucket = bucket } + s3conn := meta.(*AWSClient).s3conn + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) @@ -215,14 +229,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) } - if _, ok := d.GetOk("bucket"); !ok { - d.Set("bucket", d.Id()) - } - // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: aws.String(d.Get("bucket").(string)), + Bucket: bucket, }) }) if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { From eef6a43311e4d53a4f459a3f34c5e51a39b9bfa8 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 24 Aug 2021 16:11:06 -0700 Subject: [PATCH 019/304] Cleanout and relocate testing logic Relocate replication testing helper functions out of the s3 bucket tests and into the replication configuration testing file. Remove s3 bucket existance checks from replication testing per does not apply to the replication resource logic. --- ...3_bucket_replication_configuration_test.go | 290 ++++++++++-------- aws/resource_aws_s3_bucket_test.go | 64 ---- 2 files changed, 155 insertions(+), 199 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 4ac4e078867e..e9bf0f3c4497 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "reflect" + "sort" + "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -9,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { @@ -106,8 +110,6 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -125,10 +127,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -172,8 +170,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -191,10 +187,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -242,8 +234,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -261,9 +251,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -299,7 +286,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -319,7 +305,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -352,7 +337,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -390,7 +374,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -410,7 +393,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -440,7 +422,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -470,8 +451,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -488,10 +467,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), + Check: resource.ComposeTestCheckFunc(), }, { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -504,47 +480,8 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 -func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { - rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -564,10 +501,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -593,10 +528,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -629,10 +562,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -666,10 +597,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -707,10 +636,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -755,8 +682,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() rName := acctest.RandomWithPrefix("tf-acc-test") - destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -766,12 +693,10 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExists(resourceName), testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -795,7 +720,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { ), }, { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, @@ -806,8 +731,71 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + for _, rule := range rules { + if dest := rule.Destination; dest != nil { + if account := dest.Account; account != nil && strings.HasPrefix(aws.StringValue(dest.Account), "${") { + resourceReference := strings.Replace(aws.StringValue(dest.Account), "${", "", 1) + resourceReference = strings.Replace(resourceReference, "}", "", 1) + resourceReferenceParts := strings.Split(resourceReference, ".") + resourceAttribute := resourceReferenceParts[len(resourceReferenceParts)-1] + resourceName := strings.Join(resourceReferenceParts[:len(resourceReferenceParts)-1], ".") + value := s.RootModule().Resources[resourceName].Primary.Attributes[resourceAttribute] + dest.Account = aws.String(value) + } + if ec := dest.EncryptionConfiguration; ec != nil { + if ec.ReplicaKmsKeyID != nil { + key_arn := s.RootModule().Resources["aws_kms_key.replica"].Primary.Attributes["arn"] + ec.ReplicaKmsKeyID = aws.String(strings.Replace(*ec.ReplicaKmsKeyID, "${aws_kms_key.replica.arn}", key_arn, -1)) + } + } + } + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + + conn := testAccProvider.Meta().(*AWSClient).s3conn + out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(rs.Primary.ID), + }) + if err != nil { + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + return fmt.Errorf("S3 bucket not found") + } + if rules == nil { + return nil + } + return fmt.Errorf("GetReplicationConfiguration error: %v", err) + } + + for _, rule := range out.ReplicationConfiguration.Rules { + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + if !reflect.DeepEqual(out.ReplicationConfiguration.Rules, rules) { + return fmt.Errorf("bad replication rules, expected: %v, got %v", rules, out.ReplicationConfiguration.Rules) + } + + return nil + } +} + func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { - return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` + return fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_iam_role" "role" { @@ -853,8 +841,7 @@ resource "aws_s3_bucket" "source" { lifecycle { ignore_changes = [replication_configuration] } -} -`, randInt) +} `, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { @@ -873,8 +860,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "%[1]s" } } -} -`, storageClass) +} `, storageClass) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { @@ -948,8 +934,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1034,8 +1019,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1089,8 +1073,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1122,8 +1105,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1149,8 +1131,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1172,8 +1153,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1212,8 +1192,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { @@ -1231,8 +1210,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.destination.arn } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { @@ -1250,28 +1228,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` -} - -func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { @@ -1293,8 +1250,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { @@ -1318,8 +1274,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { @@ -1345,8 +1300,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { @@ -1375,8 +1329,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { @@ -1402,6 +1355,73 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } +} ` } -` + +func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Mon, 30 Aug 2021 10:19:46 -0700 Subject: [PATCH 020/304] Support Existing Object Replication Adding schema for ExistingObjectReplication configuration Adding read logic to identify ExistingObjectReplication configurations added to replication rules Adding update logic to include ExistingObjectReplicaiton configuration in the PutBucketReplicaiton input --- ...aws_s3_bucket_replication_configuration.go | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1eb8c0334882..09a75e12c6fe 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -148,6 +148,21 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "existing_object_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + }, + }, + }, + }, "delete_marker_replication_status": { Type: schema.TypeString, Optional: true, @@ -248,7 +263,6 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met d.Set("role", aws.StringValue(r.Role)) } - // set rules, these need to be flattened rules := make([]interface{}, 0, len(r.Rules)) for _, v := range r.Rules { t := make(map[string]interface{}) @@ -277,6 +291,12 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met t["destination"] = []interface{}{rd} } + if v.ExistingObjectReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) + t["existing_object_replication"] = status + } + if v.ID != nil { t["id"] = aws.StringValue(v.ID) } @@ -354,6 +374,14 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.ID = aws.String(rrid.(string)) } + eor := rr["existing_object_replication"].([]interface{}) + if len(eor) > 0 { + s := eor[0].(map[string]interface{}) + rcRule.ExistingObjectReplication = &s3.ExistingObjectReplication{ + Status: aws.String(s["status"].(string)), + } + } + ruleDestination := &s3.Destination{} if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { if dest[0] != nil { From 796c1cba6315c317108a9874d5dfacd45afc9808 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 30 Aug 2021 10:23:04 -0700 Subject: [PATCH 021/304] Testing for ExistingObjectReplication In order for ExistingObjectReplication to work on s3 buckets, a request to AWS Technical Support needs to be made. Once they allow the configuration the test will operate as expected. --- ...3_bucket_replication_configuration_test.go | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e9bf0f3c4497..31bf253c6982 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -731,6 +731,68 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +const isExistingObjectReplicationBlocked = true + +func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { + if isExistingObjectReplicationBlocked { + /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. + Once that request is approved, this can be unblocked for testing. */ + return + } + resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + ExistingObjectReplication: &s3.ExistingObjectReplication{ + Status: aws.String(s3.ExistingObjectReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] @@ -1425,3 +1487,76 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } `, rName, rNameDestination, rInt) } + +func testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Wed, 1 Sep 2021 08:39:13 -0700 Subject: [PATCH 022/304] Adding support for Replication Time Control new schema definition for "replication_time" along with update and read logic. tracking upstream changes, adopt "waiter" module --- ...aws_s3_bucket_replication_configuration.go | 62 +++++++++++++++- ...3_bucket_replication_configuration_test.go | 72 +++++++++++++++++++ 2 files changed, 131 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 09a75e12c6fe..4fd9a8ce7c73 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -91,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "replication_time": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + }, + "time": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -157,7 +188,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), }, }, @@ -208,7 +239,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met s3conn := meta.(*AWSClient).s3conn - err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + err := resource.Retry(waiter.BucketCreatedTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { @@ -288,10 +319,20 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } rd["access_control_translation"] = []interface{}{rdt} } + if v.Destination.ReplicationTime != nil { + if v.Destination.ReplicationTime.Status != nil { + rd["replication_time"] = map[string]interface{}{ + "status": v.Destination.ReplicationTime.Status, + "time": map[string]interface{}{ + "minutes": v.Destination.ReplicationTime.Time.Minutes, + }, + } + } + } t["destination"] = []interface{}{rd} } - if v.ExistingObjectReplication.Status != nil { + if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) t["existing_object_replication"] = status @@ -408,6 +449,21 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) ruleDestination.AccessControlTranslation = ruleAclTranslation } + + rt, ok := bd["replication_time"].([]interface{}) + if ok && len(rt) > 0 { + s := rt[0].(map[string]interface{}) + if t, ok := s["time"].([]interface{}); ok && len(t) > 0 { + m := t[0].(map[string]interface{}) + ruleDestination.ReplicationTime = &s3.ReplicationTime{ + Status: aws.String(s["status"].(string)), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 31bf253c6982..7d44e94c1664 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -448,6 +448,54 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo }) } +func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigRTC(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + ReplicationTime: &s3.ReplicationTime{ + Status: aws.String(s3.ReplicationTimeStatusEnabled), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -925,6 +973,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } `, storageClass) } +func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } + } + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), From cd5556b8ea9e0e5cd3b7ea0ad7bfca6b389aef86 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 14:29:24 -0700 Subject: [PATCH 023/304] Adding Metrics support Metrics are a requirement for the Replication Time Control functionality. Adding it here. Restructure the configuration read logic for Replication Time to be more correct and inline with expected data structures Update tests to reflect changes --- ...aws_s3_bucket_replication_configuration.go | 66 +++++++++++++++++-- ...3_bucket_replication_configuration_test.go | 27 ++++++-- 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4fd9a8ce7c73..1cf97c4b1d82 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -92,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "metrics": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + }, + "event_threshold": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, "replication_time": { Type: schema.TypeList, Optional: true, @@ -320,13 +350,23 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met rd["access_control_translation"] = []interface{}{rdt} } if v.Destination.ReplicationTime != nil { + drt := make(map[string]interface{}) if v.Destination.ReplicationTime.Status != nil { - rd["replication_time"] = map[string]interface{}{ - "status": v.Destination.ReplicationTime.Status, - "time": map[string]interface{}{ - "minutes": v.Destination.ReplicationTime.Time.Minutes, - }, - } + drt["status"] = aws.StringValue(v.Destination.ReplicationTime.Status) + drtm := make(map[string]interface{}) + drtm["minutes"] = aws.Int64Value(v.Destination.ReplicationTime.Time.Minutes) + drt["time"] = []interface{}{drtm} + rd["replication_time"] = []interface{}{drt} + } + } + if v.Destination.Metrics != nil { + dm := make(map[string]interface{}) + if v.Destination.Metrics.Status != nil { + dm["status"] = aws.StringValue(v.Destination.Metrics.Status) + dmetm := make(map[string]interface{}) + dmetm["minutes"] = aws.Int64Value(v.Destination.Metrics.EventThreshold.Minutes) + dm["event_threshold"] = []interface{}{dmetm} + rd["metrics"] = []interface{}{dm} } } t["destination"] = []interface{}{rd} @@ -464,6 +504,20 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } } + rm, ok := bd["metrics"].([]interface{}) + if ok && len(rm) > 0 { + s := rm[0].(map[string]interface{}) + if et, ok := s["event_threshold"].([]interface{}); ok && len(et) > 0 { + m := et[0].(map[string]interface{}) + ruleDestination.Metrics = &s3.Metrics{ + Status: aws.String(s["status"].(string)), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 7d44e94c1664..70ff1e4cea98 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -475,7 +475,8 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { resourceName, []*s3.ReplicationRule{ { - ID: aws.String("foobar"), + ID: aws.String("foobar"), + Priority: aws.Int64(0), Destination: &s3.Destination{ Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), ReplicationTime: &s3.ReplicationTime{ @@ -484,8 +485,19 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { Minutes: aws.Int64(15), }, }, + Metrics: &s3.Metrics{ + Status: aws.String(s3.MetricsStatusEnabled), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), }, - Prefix: aws.String("foo"), Status: aws.String(s3.ReplicationRuleStatusEnabled), }, }, @@ -981,9 +993,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { rules { id = "foobar" - prefix = "foo" + filter { + prefix = "foo" + } status = "Enabled" - destination { bucket = aws_s3_bucket.destination.arn replication_time { @@ -992,6 +1005,12 @@ resource "aws_s3_bucket_replication_configuration" "replication" { minutes = 15 } } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } + } } } }` From b398f35f9fd9c852bc49e1ae64e35a679ceb9133 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 16:14:24 -0700 Subject: [PATCH 024/304] Adding Replica Modifications support, with tests Update the the source_selection_criteria configuration to include the replica_modificaions. Refactored sse_kms_encrypted_objects schema to map closer to the actual AWS SDK structure. --- ...aws_s3_bucket_replication_configuration.go | 44 ++++++--- ...3_bucket_replication_configuration_test.go | 99 +++++++++++++++---- 2 files changed, 109 insertions(+), 34 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1cf97c4b1d82..8cfffba4044e 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -159,7 +159,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "sse_kms_encrypted_objects": { @@ -169,9 +169,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + }, + }, + }, + }, + "replica_modifications": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), }, }, }, @@ -391,11 +407,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met tssc := make(map[string]interface{}) if vssc.SseKmsEncryptedObjects != nil { tSseKms := make(map[string]interface{}) - if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { - tSseKms["enabled"] = true - } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { - tSseKms["enabled"] = false - } + tSseKms["status"] = aws.StringValue(vssc.SseKmsEncryptedObjects.Status) tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} } t["source_selection_criteria"] = []interface{}{tssc} @@ -530,14 +542,18 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m if sseKms[0] != nil { sseKmsValues := sseKms[0].(map[string]interface{}) sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} - if sseKmsValues["enabled"].(bool) { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) - } else { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) - } + sseKmsEncryptedObjects.Status = aws.String(sseKmsValues["status"].(string)) ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects } } + if sscRm, ok := sscValues["replica_modifications"].([]interface{}); ok && len(sscRm) > 0 { + if sscRm[0] != nil { + replicaModValues := sscRm[0].(map[string]interface{}) + replicaModifications := &s3.ReplicaModifications{} + replicaModifications.Status = aws.String(replicaModValues["status"].(string)) + ruleSsc.ReplicaModifications = replicaModifications + } + } rcRule.SourceSelectionCriteria = ruleSsc } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 70ff1e4cea98..acb0374ab3f6 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -508,6 +508,59 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }) } +func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Priority: aws.Int64(0), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + ReplicaModifications: &s3.ReplicaModifications{ + Status: aws.String(s3.ReplicaModificationsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -1016,6 +1069,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } +func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" + } + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), @@ -1254,7 +1331,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1341,7 +1418,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1366,24 +1443,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } ` } -func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} ` -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { From 48bdc44c3c21942e8dc740ed40fa86326de45f37 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 14:42:41 -0700 Subject: [PATCH 025/304] terrafmt --- ...3_bucket_replication_configuration_test.go | 570 +++++++++--------- 1 file changed, 285 insertions(+), 285 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index acb0374ab3f6..522c33899795 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1002,94 +1002,94 @@ resource "aws_s3_bucket" "destination" { } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket" "source" { - bucket = "tf-test-bucket-source-%[1]d" + bucket = "tf-test-bucket-source-%[1]d" versioning { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } -} `, randInt) +}`, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "%[1]s" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "%[1]s" } -} `, storageClass) + } +}`, storageClass) } func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" + rules { + id = "foobar" + filter { + prefix = "foo" + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - replication_time { - status = "Enabled" - time { - minutes = 15 - } - } - metrics { - status = "Enabled" - event_threshold { - minutes = 15 - } - } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } } } + } }` } func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" - } - source_selection_criteria { - replica_modifications { - status = "Enabled" - } - } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" } } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } }` } @@ -1105,7 +1105,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1117,54 +1117,54 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } - -} `, randInt)) + } + +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1179,7 +1179,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1191,65 +1191,65 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter { - prefix = "prefix3" + filter { + prefix = "prefix3" - tags = { - Key3 = "Value3" - } + tags = { + Key3 = "Value3" } + } - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1264,46 +1264,46 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1316,26 +1316,26 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1344,24 +1344,24 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1370,20 +1370,20 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1398,176 +1398,176 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - } + destination { + bucket = aws_s3_bucket.destination.arn } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - delete_marker_replication_status = "Enabled" + delete_marker_replication_status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 42 + priority = 42 - filter { - tags = { - ReplicateMe = "Yes" - } + filter { + tags = { + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 41 + priority = 41 - filter { - prefix = "foo" + filter { + prefix = "foo" - tags = { - AnotherTag = "OK" - ReplicateMe = "Yes" - } + tags = { + AnotherTag = "OK" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - tags = { - AnotherTag = "OK" - Foo = "Bar" - ReplicateMe = "Yes" - } + filter { + tags = { + AnotherTag = "OK" + Foo = "Bar" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { From 22ee15a150fcb656a798232063d13e268e414f4d Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 15:46:23 -0700 Subject: [PATCH 026/304] terrafmt --- ...3_bucket_replication_configuration_test.go | 89 ++++++++++--------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 522c33899795..012e5f5bb750 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1573,7 +1573,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { return fmt.Sprintf(` resource "aws_iam_role" "test" { - name = %[1]q + name = "%[1]s" assume_role_policy = < Date: Fri, 3 Sep 2021 15:50:53 -0700 Subject: [PATCH 027/304] Initial documentation for new resource Adding documentation page for the new independent resource. Initialized with content copied over from the s3_bucket.html.markdown page. --- ...et_replication_configuration.html.markdown | 190 ++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 website/docs/r/s3_bucket_replication_configuration.html.markdown diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown new file mode 100644 index 000000000000..9333835f4d57 --- /dev/null +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -0,0 +1,190 @@ +--- +subcategory: "S3" +layout: "aws" +page_title: "AWS: aws_s3_bucket_replication_configuration" +description: |- + Provides a S3 bucket replication configuration resource. +--- + +# Resource: aws_s3_bucket_replication_configuration + +Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. + +## Example Usage + +### Using replication configuration + +```terraform +provider "aws" { + region = "eu-west-1" +} + +provider "aws" { + alias = "central" + region = "eu-central-1" +} + +resource "aws_iam_role" "replication" { + name = "tf-iam-role-replication-12345" + + assume_role_policy = < **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. +Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `destination` - (Required) Specifies the destination for the rule (documented below). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. +* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. + +The `destination` object supports the following: + +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + `sse_kms_encrypted_objects` source selection criteria. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. + +The `source_selection_criteria` object supports the following: + +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. + +The `sse_kms_encrypted_objects` object supports the following: + +* `enabled` - (Required) Boolean which indicates if this criteria is enabled. + +The `filter` object supports the following: + +* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. +The rule applies only to objects having all the tags in its tagset. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +## Import + +S3 bucket replication configuration can be imported using the `bucket`, e.g. + +``` +$ terraform import aws_s3_bucket_replication_configuration.replication bucket-name +``` From 644dc23a8ca3080a7807c6419f3d4e87636db346 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 9 Sep 2021 14:26:09 -0700 Subject: [PATCH 028/304] adding new feature documentation --- website/docs/r/s3_bucket.html.markdown | 14 +++++++ ...et_replication_configuration.html.markdown | 40 ++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 88ce04b1eea0..05f1a50e7ee4 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -289,6 +289,10 @@ resource "aws_s3_bucket" "source" { } ``` +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + + + ### Enable Default Server Side Encryption ```terraform @@ -424,6 +428,16 @@ The `noncurrent_version_transition` object supports the following The `replication_configuration` object supports the following: +~> **NOTE:** See the `aws_s3_bucket_replication_configuration` resource documentation to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9333835f4d57..9870635e0692 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -107,6 +107,11 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + lifecycle { + ignore_changes = [ + replication_configuration + ] + } } aws_s3_bucket_replication_configuration replication { @@ -126,6 +131,17 @@ aws_s3_bucket_replication_configuration replication { ``` +~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + + ## Argument Reference The following arguments are supported: @@ -142,6 +158,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). @@ -153,6 +170,10 @@ Replication configuration V1 supports filtering based on only the `prefix` attri ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +The `existing_object_replication` object supports the following: + +* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -161,15 +182,32 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). +* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). + +The `replication_time` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + +The `metrics` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `source_selection_criteria` object supports the following: +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between + replicas and source objects (documented below). + * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` in `destination` must be specified as well. +The `replica_modifications` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `sse_kms_encrypted_objects` object supports the following: -* `enabled` - (Required) Boolean which indicates if this criteria is enabled. +* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `filter` object supports the following: From bd4302cb0790c9aeafdfe5829ebbfa3646869ef8 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:09 -0700 Subject: [PATCH 029/304] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 4 ++-- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 012e5f5bb750..e550fcad46da 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,11 +844,11 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = true +const isExistingObjectReplicationBlocked = false func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { - /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + /* https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. Once that request is approved, this can be unblocked for testing. */ return diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9870635e0692..2511d95454bb 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -172,6 +172,8 @@ Replication configuration V1 supports filtering based on only the `prefix` attri The `existing_object_replication` object supports the following: +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) + * `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `destination` object supports the following: From 17a88f464b9f5326de0a6cb8cf6a3bb616fdeceb Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:56 -0700 Subject: [PATCH 030/304] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e550fcad46da..fed215ad3c14 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,7 +844,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = false +const isExistingObjectReplicationBlocked = true func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { From 8bf7bc6a2785e0151c94354904a8ed2bd7f48209 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 13 Sep 2021 15:01:45 -0700 Subject: [PATCH 031/304] adding headers and source examples to documentation --- ...et_replication_configuration.html.markdown | 162 +++++++++++++++--- 1 file changed, 139 insertions(+), 23 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 2511d95454bb..091286c63abb 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -131,6 +131,76 @@ aws_s3_bucket_replication_configuration replication { ``` +### Bi-Directional Replication + +``` + +... + +resource "aws_s3_bucket" "east" { + bucket = "tf-test-bucket-east-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +resource "aws_s3_bucket" "west" { + provider = west + bucket = "tf-test-bucket-west-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +aws_s3_bucket_replication_configuration "east_to_west" { + role = aws_iam_role.east_replication.arn + bucket = aws_s3_bucket.east.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.west.arn + storage_class = "STANDARD" + } + } +} + +aws_s3_bucket_replication_configuration "west_to_east" { + role = aws_iam_role.west_replication.arn + bucket = aws_s3_bucket.west.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.east.arn + storage_class = "STANDARD" + } + } +} +``` + +## Usage Notes + +This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. + ~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. ``` @@ -140,11 +210,17 @@ lifecycle { ] } ``` +The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +* `replica_modifications` - Added to the `source_selection_criteria` configuration +* `metrics` - Added to the `destination` configuration +* `replication_time` - Added to the `destination` configuration +* `existing_object_replication` - Added to the replication rule + +Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference -The following arguments are supported: +## Argument Reference The `replication_configuration` object supports the following: @@ -152,30 +228,42 @@ The `replication_configuration` object supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). -The `rules` object supports the following: +### Rules + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. ~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. + +The `rules` object supports the following: + With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). -* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. +* `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +### Rule Existing Object Replication + +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) +``` +existing_object_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -187,29 +275,61 @@ The `destination` object supports the following: * `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). * `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +### Replication Time Control + +``` +replication_time { + status = "Enabled" + time { + minutes = 15 + } +} +``` + The `replication_time` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. + +### Metrics + +``` +metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } +} +``` The `metrics` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. + +### Source Selection Criteria The `source_selection_criteria` object supports the following: +``` +source_selection_criteria { + replica_modification { + status = "Enabled" + } + sse_kms_encrypted_objects { + status = "Enabled" + } +} +``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects (documented below). - -* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. - -The `replica_modifications` object supports the following: + replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -The `sse_kms_encrypted_objects` object supports the following: + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Replication Rule Filter The `filter` object supports the following: @@ -217,10 +337,6 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 6bae9ee8d5f855d09ed4a5295da91205801cd786 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 14 Sep 2021 10:30:09 -0700 Subject: [PATCH 032/304] adding internal documentation links, cleanup --- ...et_replication_configuration.html.markdown | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 091286c63abb..099b3261ab78 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -8,7 +8,7 @@ description: |- # Resource: aws_s3_bucket_replication_configuration -Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. +Provides an independent configuration resource for S3 bucket [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). ## Example Usage @@ -199,9 +199,9 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. +~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. ``` lifecycle { @@ -210,25 +210,25 @@ lifecycle { ] } ``` -The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: -* `replica_modifications` - Added to the `source_selection_criteria` configuration -* `metrics` - Added to the `destination` configuration -* `replication_time` - Added to the `destination` configuration -* `existing_object_replication` - Added to the replication rule +* `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) +* `metrics` - Added to the `destination` configuration object [documented below](#metrics) +* `replication_time` - Added to the `destination` configuration object [documented below](#replication_time) +* `existing_object_replication` - Added to the replication rule object [documented below](#existing_object_replication) Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) ## Argument Reference -The `replication_configuration` object supports the following: +The `replication_configuration` resource supports the following: -* `bucket` - (Required) The ARN of the source S3 bucket where you want Amazon S3 to monitor. +* `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. -* `rules` - (Required) Specifies the rules managing the replication (documented below). +* `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### Rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -236,20 +236,19 @@ The `replication_configuration` object supports the following: The `rules` object supports the following: -With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. -Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. -* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). -* `destination` - (Required) Specifies the destination for the rule (documented below). -* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `destination` - (Required) Specifies the destination for the rule [documented below](#destination). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. -* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### Rule Existing Object Replication +### exiting_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) @@ -263,7 +262,7 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -### Destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -272,10 +271,10 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. -* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). -* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +* `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). +* `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). -### Replication Time Control +### replication_time ``` replication_time { @@ -291,7 +290,7 @@ The `replication_time` object supports the following: * `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. -### Metrics +### metrics ``` metrics { @@ -307,7 +306,7 @@ The `metrics` object supports the following: * `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. -### Source Selection Criteria +### source_selection_criteria The `source_selection_criteria` object supports the following: ``` @@ -322,14 +321,14 @@ source_selection_criteria { ``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -### Replication Rule Filter +### filter The `filter` object supports the following: From 8d0562db0ea5c1f4ceda4d4b53ef664cbcd5a3e0 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 15 Sep 2021 16:34:21 -0700 Subject: [PATCH 033/304] Align delete_marker_replication with other objects --- aws/resource_aws_s3_bucket.go | 2 +- ...aws_s3_bucket_replication_configuration.go | 50 +++++++++++-------- ...3_bucket_replication_configuration_test.go | 12 +++-- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 097ccbfb045f..df3e5b460006 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -2585,7 +2585,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 8cfffba4044e..581ac90653f7 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -96,13 +96,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), }, "event_threshold": { Type: schema.TypeList, @@ -126,13 +126,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), }, "time": { Type: schema.TypeList, @@ -172,7 +172,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), }, }, }, @@ -187,7 +187,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), }, }, }, @@ -235,15 +235,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), }, }, }, }, - "delete_marker_replication_status": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + "delete_marker_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), + }, + }, + }, }, }, }, @@ -391,7 +401,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) - t["existing_object_replication"] = status + t["existing_object_replication"] = []interface{}{status} } if v.ID != nil { @@ -431,8 +441,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } t["filter"] = []interface{}{m} - if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { - t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + t["delete_marker_replication"] = []interface{}{status} } } @@ -573,13 +585,11 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) } - if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(dmr), - } - } else { + dmr, ok := rr["delete_marker_replication"].([]interface{}) + if ok && len(dmr) > 0 { + s := dmr[0].(map[string]interface{}) rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s["status"].(string)), } } } else { diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index fed215ad3c14..c5c6879cf80b 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1479,7 +1479,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "foo" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1628,7 +1630,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "testprefix" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1701,7 +1705,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn From eb4cc6bd008ec11faa466c7aa29265b4e8595a0e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 08:31:08 -0700 Subject: [PATCH 034/304] Update delete_marker replication docs to reflect changes --- ...ket_replication_configuration.html.markdown | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 099b3261ab78..44d46f1c4d09 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -239,7 +239,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication` - (Optional) Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when `filter` is used)[documented below](#delete_marker_replication). * `destination` - (Required) Specifies the destination for the rule [documented below](#destination). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. @@ -262,6 +262,22 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +### delete_marker_replication + +~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. + +~> **NOTE:** This argument is only available with V2 replication configurations. + +The `delete_marker_replication` object supports the following: + +``` +delete_marker_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + + ### destination The `destination` object supports the following: From 27e63481ef9d9a6e1b3f7b8f84f6ca7749b86ba1 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 13:59:37 -0700 Subject: [PATCH 035/304] Documentation adjustments fix typos shift notes to be above examples remove unnecssary words expand on some attribute concepts that maybe obscure --- website/docs/r/s3_bucket.html.markdown | 6 +-- ...et_replication_configuration.html.markdown | 51 ++++++++++--------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 05f1a50e7ee4..6351cbeeda80 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -178,6 +178,8 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using replication configuration +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + ```terraform provider "aws" { region = "eu-west-1" @@ -289,10 +291,6 @@ resource "aws_s3_bucket" "source" { } ``` -~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. - - - ### Enable Default Server Side Encryption ```terraform diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 44d46f1c4d09..de512bcb34d4 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,9 +133,9 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -``` +```terraform -... +#... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -199,17 +199,18 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. - ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -``` +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. + +```terraform lifecycle { ignore_changes = [ replication_configuration ] } ``` + The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: * `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) @@ -248,51 +249,53 @@ With the `filter` attribute, you can specify object filters based on the object * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### exiting_object_replication +### existing_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -``` +```terraform existing_object_replication { status = "Enabled" } ``` -* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. ### delete_marker_replication -~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. +~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. ~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: -``` +```terraform delete_marker_replication { status = "Enabled" } ``` -* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. ### destination The `destination` object supports the following: -* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. -* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. * `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with `sse_kms_encrypted_objects` source selection criteria. -* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. -* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. * `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). * `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). ### replication_time -``` +```terraform replication_time { status = "Enabled" time { @@ -303,12 +306,12 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics -``` +```terraform metrics { status = "Enabled" event_threshold { @@ -319,13 +322,14 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria The `source_selection_criteria` object supports the following: -``` + +```terraform source_selection_criteria { replica_modification { status = "Enabled" @@ -336,13 +340,14 @@ source_selection_criteria { } ``` + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. + * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. - ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. ### filter From 7735848b5ae4f19129a4a60cce3cd5fb4261b183 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:15:32 -0700 Subject: [PATCH 036/304] linting --- ...s3_bucket_replication_configuration.html.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index de512bcb34d4..ec38b8c4d2d7 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -229,7 +229,7 @@ The `replication_configuration` resource supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -268,7 +268,7 @@ existing_object_replication { ~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. -~> **NOTE:** This argument is only available with V2 replication configurations. +~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: @@ -281,7 +281,7 @@ delete_marker_replication { * `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. -### destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. @@ -306,7 +306,7 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics @@ -322,7 +322,7 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria @@ -342,7 +342,7 @@ source_selection_criteria { ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` From 2411b0ee6e5c64b62786edf89ca909bcbe5ef261 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:41:01 -0700 Subject: [PATCH 037/304] linting/fmt --- aws/resource_aws_s3_bucket_replication_configuration.go | 2 +- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 581ac90653f7..9fed71889cfa 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -347,7 +347,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met r := replication.ReplicationConfiguration // set role if r.Role != nil && aws.StringValue(r.Role) != "" { - d.Set("role", aws.StringValue(r.Role)) + d.Set("role", r.Role) } rules := make([]interface{}, 0, len(r.Rules)) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index ec38b8c4d2d7..c119ce2bcd0d 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference +## Attributes Reference The `replication_configuration` resource supports the following: From 64400e1596f7e4021841ee01c7ae7a5840584730 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 15:00:32 -0700 Subject: [PATCH 038/304] adding missing attribute reference to documentation --- .../r/s3_bucket_replication_configuration.html.markdown | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index c119ce2bcd0d..eea1a86a844e 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Attributes Reference +## Argument Reference The `replication_configuration` resource supports the following: @@ -357,6 +357,12 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* id - Resource id is the s3 source bucket name. + ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 6e7484558916c0a90b565c252759773af5c23ad8 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:49:58 -0400 Subject: [PATCH 039/304] address linter-related errors --- ...3_bucket_replication_configuration_test.go | 14 ++++----- ...et_replication_configuration.html.markdown | 31 +++++++++---------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c5c6879cf80b..d8274ed6cf2c 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1480,8 +1480,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1631,8 +1631,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1706,8 +1706,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1715,7 +1715,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } - - `, rName, rNameDestination, rInt) } diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index eea1a86a844e..6c38b9703c08 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -114,9 +114,9 @@ resource "aws_s3_bucket" "source" { } } -aws_s3_bucket_replication_configuration replication { +resource "aws_s3_bucket_replication_configuration" "replication" { role = aws_iam_role.replication.arn - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id rules { id = "foobar" prefix = "foo" @@ -128,14 +128,13 @@ aws_s3_bucket_replication_configuration replication { } } } - ``` ### Bi-Directional Replication ```terraform -#... +# ... other configuration ... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -166,9 +165,9 @@ resource "aws_s3_bucket" "west" { } } -aws_s3_bucket_replication_configuration "east_to_west" { +resource "aws_s3_bucket_replication_configuration" "east_to_west" { role = aws_iam_role.east_replication.arn - bucket = aws_s3_bucket.east.id + bucket = aws_s3_bucket.east.id rules { id = "foobar" prefix = "foo" @@ -181,9 +180,9 @@ aws_s3_bucket_replication_configuration "east_to_west" { } } -aws_s3_bucket_replication_configuration "west_to_east" { +resource "aws_s3_bucket_replication_configuration" "west_to_east" { role = aws_iam_role.west_replication.arn - bucket = aws_s3_bucket.west.id + bucket = aws_s3_bucket.west.id rules { id = "foobar" prefix = "foo" @@ -201,9 +200,9 @@ aws_s3_bucket_replication_configuration "west_to_east" { ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. -```terraform +```hcl lifecycle { ignore_changes = [ replication_configuration @@ -255,7 +254,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```terraform +```hcl existing_object_replication { status = "Enabled" } @@ -272,7 +271,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```terraform +```hcl delete_marker_replication { status = "Enabled" } @@ -295,7 +294,7 @@ The `destination` object supports the following: ### replication_time -```terraform +```hcl replication_time { status = "Enabled" time { @@ -311,7 +310,7 @@ The `replication_time` object supports the following: ### metrics -```terraform +```hcl metrics { status = "Enabled" event_threshold { @@ -329,7 +328,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```terraform +```hcl source_selection_criteria { replica_modification { status = "Enabled" @@ -357,7 +356,7 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attribute Reference +## Attributes Reference In addition to all arguments above, the following attributes are exported: From 71d8119187b60ca73e0c5b9a8af31ed76bd356cb Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:52:12 -0400 Subject: [PATCH 040/304] Update CHANGELOG for #20777 --- .changelog/20777.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/20777.txt diff --git a/.changelog/20777.txt b/.changelog/20777.txt new file mode 100644 index 000000000000..75e556fa77e9 --- /dev/null +++ b/.changelog/20777.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_s3_bucket_replication_configuration +``` From eceb584d360ac3d5121f960dc9a61a4d2fcede0a Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 10:17:16 -0400 Subject: [PATCH 041/304] forgo syntax highlighting in short snippet code blocks in documentation --- ...bucket_replication_configuration.html.markdown | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 6c38b9703c08..877f4082b261 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,7 +133,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication ```terraform - # ... other configuration ... resource "aws_s3_bucket" "east" { @@ -202,7 +201,7 @@ resource "aws_s3_bucket_replication_configuration" "west_to_east" { This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. -```hcl +``` lifecycle { ignore_changes = [ replication_configuration @@ -254,7 +253,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```hcl +``` existing_object_replication { status = "Enabled" } @@ -271,7 +270,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```hcl +``` delete_marker_replication { status = "Enabled" } @@ -294,7 +293,7 @@ The `destination` object supports the following: ### replication_time -```hcl +``` replication_time { status = "Enabled" time { @@ -310,7 +309,7 @@ The `replication_time` object supports the following: ### metrics -```hcl +``` metrics { status = "Enabled" event_threshold { @@ -328,7 +327,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```hcl +``` source_selection_criteria { replica_modification { status = "Enabled" @@ -366,6 +365,6 @@ In addition to all arguments above, the following attributes are exported: S3 bucket replication configuration can be imported using the `bucket`, e.g. -``` +```sh $ terraform import aws_s3_bucket_replication_configuration.replication bucket-name ``` From f6839047e70c1eb06483216933e2849a0ea9d7c3 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 23 Sep 2021 07:46:36 -0700 Subject: [PATCH 042/304] use untyped code blocks until new resource is merged to validate --- ...ucket_replication_configuration.html.markdown | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index eea1a86a844e..ee5ce245b7bb 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -14,7 +14,7 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration -```terraform +``` provider "aws" { region = "eu-west-1" } @@ -133,7 +133,7 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -```terraform +``` #... @@ -203,7 +203,7 @@ aws_s3_bucket_replication_configuration "west_to_east" { This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -```terraform +``` lifecycle { ignore_changes = [ replication_configuration @@ -255,7 +255,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```terraform +``` existing_object_replication { status = "Enabled" } @@ -272,7 +272,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```terraform +``` delete_marker_replication { status = "Enabled" } @@ -295,7 +295,7 @@ The `destination` object supports the following: ### replication_time -```terraform +``` replication_time { status = "Enabled" time { @@ -311,7 +311,7 @@ The `replication_time` object supports the following: ### metrics -```terraform +``` metrics { status = "Enabled" event_threshold { @@ -329,7 +329,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```terraform +``` source_selection_criteria { replica_modification { status = "Enabled" From c2e0724680426f8bc347eaebc7a57af6fbd48138 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 24 Sep 2021 10:40:10 -0700 Subject: [PATCH 043/304] Revert key renamed in error --- aws/resource_aws_s3_bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index df3e5b460006..097ccbfb045f 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -2585,7 +2585,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } From d4b7249663631a4ee92e666f942c698b1633b1f3 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sun, 26 Sep 2021 13:35:32 -0700 Subject: [PATCH 044/304] Clean up stray merge conflict --- .../r/s3_bucket_replication_configuration.html.markdown | 6 ------ 1 file changed, 6 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index fc715c35bf13..8de6f01c14ee 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -132,14 +132,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication -<<<<<<< HEAD ``` - -#... -======= -```terraform # ... other configuration ... ->>>>>>> eceb584d360ac3d5121f960dc9a61a4d2fcede0a resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" From a87d031a674258b3b756edb4de45f198e777e29e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sun, 26 Sep 2021 13:36:06 -0700 Subject: [PATCH 045/304] Add logic for explicit delete Include delete logic for replication configuration Adding test for delete logic --- ...aws_s3_bucket_replication_configuration.go | 13 ++++ ...3_bucket_replication_configuration_test.go | 62 +++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 9fed71889cfa..42cbce8d6efd 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -628,6 +628,19 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) + + dbri := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(bucket), + } + + _, err := s3conn.DeleteBucketReplication(dbri) + if err != nil { + return fmt.Errorf("Error removing S3 bucket replication: %s", err) + } return nil } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index d8274ed6cf2c..ff5181375500 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -906,6 +906,68 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) }) } +func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + testDeleted := func(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[r] + if ok { + return fmt.Errorf("Replication resource configuration %q should have been deleted.", r) + } + return nil + } + } + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSS3BucketReplicationConfigBasic(rInt), + Check: resource.ComposeTestCheckFunc(testDeleted(resourceName)), + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] From b2eb2a261ee7607bb9762b1f76018f2a58b4dd45 Mon Sep 17 00:00:00 2001 From: Edgar Lopez Date: Fri, 22 Oct 2021 17:33:52 -0600 Subject: [PATCH 046/304] feat: added resource and docs for appstream directory config --- internal/provider/provider.go | 7 +- .../service/appstream/directory_config.go | 205 ++++++++++++++++++ .../appstream/directory_config_test.go | 156 +++++++++++++ internal/service/appstream/wait.go | 2 + .../appstream_directory_config.html.markdown | 53 +++++ 5 files changed, 420 insertions(+), 3 deletions(-) create mode 100644 internal/service/appstream/directory_config.go create mode 100644 internal/service/appstream/directory_config_test.go create mode 100644 website/docs/r/appstream_directory_config.html.markdown diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 3891ed4d0de0..0a6036a38022 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -782,9 +782,10 @@ func Provider() *schema.Provider { "aws_apprunner_custom_domain_association": apprunner.ResourceCustomDomainAssociation(), "aws_apprunner_service": apprunner.ResourceService(), - "aws_appstream_fleet": appstream.ResourceFleet(), - "aws_appstream_image_builder": appstream.ResourceImageBuilder(), - "aws_appstream_stack": appstream.ResourceStack(), + "aws_appstream_directory_config": appstream.ResourceDirectoryConfig(), + "aws_appstream_fleet": appstream.ResourceFleet(), + "aws_appstream_image_builder": appstream.ResourceImageBuilder(), + "aws_appstream_stack": appstream.ResourceStack(), "aws_appsync_api_key": appsync.ResourceAPIKey(), "aws_appsync_datasource": appsync.ResourceDataSource(), diff --git a/internal/service/appstream/directory_config.go b/internal/service/appstream/directory_config.go new file mode 100644 index 000000000000..eb7f1aaab548 --- /dev/null +++ b/internal/service/appstream/directory_config.go @@ -0,0 +1,205 @@ +package appstream + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/appstream" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func ResourceDirectoryConfig() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceDirectoryConfigCreate, + ReadWithoutTimeout: resourceDirectoryConfigRead, + UpdateWithoutTimeout: resourceDirectoryConfigUpdate, + DeleteWithoutTimeout: resourceDirectoryConfigDelete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ + "created_time": { + Type: schema.TypeString, + Computed: true, + }, + "directory_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "organizational_unit_distinguished_names": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(0, 2000), + }, + Set: schema.HashString, + }, + "service_account_credentials": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_name": { + Type: schema.TypeString, + Required: true, + }, + "account_password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + }, + }, + }, + }, + } +} + +func resourceDirectoryConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).AppStreamConn + input := &appstream.CreateDirectoryConfigInput{ + DirectoryName: aws.String(d.Get("directory_name").(string)), + OrganizationalUnitDistinguishedNames: flex.ExpandStringSet(d.Get("organizational_unit_distinguished_names").(*schema.Set)), + ServiceAccountCredentials: expandServiceAccountCredentials(d.Get("service_account_credentials").([]interface{})), + } + + var err error + var output *appstream.CreateDirectoryConfigOutput + err = resource.RetryContext(ctx, directoryConfigTimeout, func() *resource.RetryError { + output, err = conn.CreateDirectoryConfigWithContext(ctx, input) + if err != nil { + if tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + + return nil + }) + + if tfresource.TimedOut(err) { + output, err = conn.CreateDirectoryConfigWithContext(ctx, input) + } + if err != nil { + return diag.FromErr(fmt.Errorf("error creating Appstream DirectoryConfig (%s): %w", d.Id(), err)) + } + + d.SetId(aws.StringValue(output.DirectoryConfig.DirectoryName)) + + return resourceDirectoryConfigRead(ctx, d, meta) +} + +func resourceDirectoryConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).AppStreamConn + + resp, err := conn.DescribeDirectoryConfigsWithContext(ctx, &appstream.DescribeDirectoryConfigsInput{DirectoryNames: []*string{aws.String(d.Id())}}) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] Appstream DirectoryConfig (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return diag.FromErr(fmt.Errorf("error reading Appstream DirectoryConfig (%s): %w", d.Id(), err)) + } + + if len(resp.DirectoryConfigs) == 0 { + return diag.FromErr(fmt.Errorf("error reading Appstream DirectoryConfig (%s): %s", d.Id(), "empty response")) + } + + if len(resp.DirectoryConfigs) > 1 { + return diag.FromErr(fmt.Errorf("error reading Appstream DirectoryConfig (%s): %s", d.Id(), "multiple directories config found")) + } + + directoryConfig := resp.DirectoryConfigs[0] + + d.Set("created_time", aws.TimeValue(directoryConfig.CreatedTime).Format(time.RFC3339)) + d.Set("directory_name", directoryConfig.DirectoryName) + d.Set("organizational_unit_distinguished_names", flex.FlattenStringSet(directoryConfig.OrganizationalUnitDistinguishedNames)) + + if err = d.Set("service_account_credentials", flattenServiceAccountCredentials(directoryConfig.ServiceAccountCredentials, d)); err != nil { + return diag.FromErr(fmt.Errorf("error setting `%s` for AppStream DirectoryConfig (%s): %w", "service_account_credentials", d.Id(), err)) + } + + return nil +} + +func resourceDirectoryConfigUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).AppStreamConn + input := &appstream.UpdateDirectoryConfigInput{ + DirectoryName: aws.String(d.Id()), + } + + if d.HasChange("organizational_unit_distinguished_names") { + input.OrganizationalUnitDistinguishedNames = flex.ExpandStringSet(d.Get("organizational_unit_distinguished_names").(*schema.Set)) + } + + if d.HasChange("service_account_credentials") { + input.ServiceAccountCredentials = expandServiceAccountCredentials(d.Get("service_account_credentials").([]interface{})) + } + + _, err := conn.UpdateDirectoryConfigWithContext(ctx, input) + if err != nil { + return diag.FromErr(fmt.Errorf("error updating Appstream DirectoryConfig (%s): %w", d.Id(), err)) + } + + return resourceDirectoryConfigRead(ctx, d, meta) +} + +func resourceDirectoryConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).AppStreamConn + + _, err := conn.DeleteDirectoryConfigWithContext(ctx, &appstream.DeleteDirectoryConfigInput{ + DirectoryName: aws.String(d.Id()), + }) + + if err != nil { + if tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { + return nil + } + return diag.FromErr(fmt.Errorf("error deleting Appstream DirectoryConfig (%s): %w", d.Id(), err)) + } + return nil +} + +func expandServiceAccountCredentials(tfList []interface{}) *appstream.ServiceAccountCredentials { + if len(tfList) == 0 { + return nil + } + + attr := tfList[0].(map[string]interface{}) + + apiObject := &appstream.ServiceAccountCredentials{ + AccountName: aws.String(attr["account_name"].(string)), + AccountPassword: aws.String(attr["account_password"].(string)), + } + + return apiObject +} + +func flattenServiceAccountCredentials(apiObject *appstream.ServiceAccountCredentials, d *schema.ResourceData) []interface{} { + if apiObject == nil { + return nil + } + + tfList := map[string]interface{}{} + tfList["account_name"] = aws.StringValue(apiObject.AccountName) + tfList["account_password"] = d.Get("service_account_credentials.0.account_password").(string) + + return []interface{}{tfList} +} diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go new file mode 100644 index 000000000000..bedd6128e274 --- /dev/null +++ b/internal/service/appstream/directory_config_test.go @@ -0,0 +1,156 @@ +package appstream_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/appstream" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfappstream "github.com/hashicorp/terraform-provider-aws/internal/service/appstream" +) + +func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { + var directoryOutput appstream.DirectoryConfig + resourceName := "aws_appstream_directory_config.test" + rName := acctest.RandomDomainName() + rUserName := fmt.Sprintf("%s\\%s", rName, sdkacctest.RandString(10)) + rPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rUserNameUpdated := fmt.Sprintf("%s\\%s", rName, sdkacctest.RandString(10)) + rPasswordUpdated := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckHasIAMRole(t, "AmazonAppStreamServiceAccess") + }, + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckDirectoryConfigDestroy, + ErrorCheck: acctest.ErrorCheck(t, appstream.EndpointsID), + Steps: []resource.TestStep{ + { + Config: testAccDirectoryConfigConfig(rName, rUserName, rPassword), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), + resource.TestCheckResourceAttr(resourceName, "directory_name", rName), + acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), + ), + }, + { + Config: testAccDirectoryConfigConfig(rName, rUserNameUpdated, rPasswordUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), + resource.TestCheckResourceAttr(resourceName, "directory_name", rName), + acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_account_credentials.0.account_password"}, + }, + }, + }) +} + +func TestAccAppStreamDirectoryConfig_disappears(t *testing.T) { + var directoryOutput appstream.DirectoryConfig + resourceName := "aws_appstream_directory_config.test" + rName := acctest.RandomDomainName() + rUserName := fmt.Sprintf("%s\\%s", rName, sdkacctest.RandString(10)) + rPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckHasIAMRole(t, "AmazonAppStreamServiceAccess") + }, + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckDirectoryConfigDestroy, + ErrorCheck: acctest.ErrorCheck(t, appstream.EndpointsID), + Steps: []resource.TestStep{ + { + Config: testAccDirectoryConfigConfig(rName, rUserName, rPassword), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), + acctest.CheckResourceDisappears(acctest.Provider, tfappstream.ResourceDirectoryConfig(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} +func testAccCheckDirectoryConfigExists(resourceName string, appStreamDirectoryConfig *appstream.DirectoryConfig) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("not found: %s", resourceName) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).AppStreamConn + resp, err := conn.DescribeDirectoryConfigs(&appstream.DescribeDirectoryConfigsInput{DirectoryNames: []*string{aws.String(rs.Primary.ID)}}) + + if err != nil { + return err + } + + if resp == nil && len(resp.DirectoryConfigs) == 0 { + return fmt.Errorf("appstream directory config %q does not exist", rs.Primary.ID) + } + + *appStreamDirectoryConfig = *resp.DirectoryConfigs[0] + + return nil + } +} + +func testAccCheckDirectoryConfigDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).AppStreamConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_appstream_directory_config" { + continue + } + + resp, err := conn.DescribeDirectoryConfigs(&appstream.DescribeDirectoryConfigsInput{DirectoryNames: []*string{aws.String(rs.Primary.ID)}}) + + if tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + return err + } + + if resp != nil && len(resp.DirectoryConfigs) > 0 { + return fmt.Errorf("appstream directory config %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccDirectoryConfigConfig(name, userName, password string) string { + return fmt.Sprintf(` +data "aws_organizations_organization" "test" {} + +data "aws_organizations_organizational_units" "test" { + parent_id = data.aws_organizations_organization.test.roots[0].id +} + +resource "aws_appstream_directory_config" "test" { + directory_name = %[1]q + organizational_unit_distinguished_names = data.aws_organizations_organizational_units.test.children.*.id + service_account_credentials{ + account_name = %[2]q + account_password = %[3]q + } +} +`, name, userName, password) +} diff --git a/internal/service/appstream/wait.go b/internal/service/appstream/wait.go index b0978861b26f..18226ca77f3f 100644 --- a/internal/service/appstream/wait.go +++ b/internal/service/appstream/wait.go @@ -23,6 +23,8 @@ const ( // imageBuilderStateTimeout Maximum amount of time to wait for the statusImageBuilderState to be RUNNING // or for the ImageBuilder to be deleted imageBuilderStateTimeout = 60 * time.Minute + // directoryConfigTimeout Maximum amount of time to wait for DirectoryConfig operation eventual consistency + directoryConfigTimeout = 4 * time.Minute ) // waitStackStateDeleted waits for a deleted stack diff --git a/website/docs/r/appstream_directory_config.html.markdown b/website/docs/r/appstream_directory_config.html.markdown new file mode 100644 index 000000000000..52911f705b0e --- /dev/null +++ b/website/docs/r/appstream_directory_config.html.markdown @@ -0,0 +1,53 @@ +--- +subcategory: "AppStream" +layout: "aws" +page_title: "AWS: aws_appstream_directory_config" +description: |- + Provides an AppStream Directory Config +--- + +# Resource: aws_appstream_directory_config + +Provides an AppStream Directory Config. + +## Example Usage + +```terraform +resource "aws_appstream_directory_config" "example" { + directory_name = "NAME OF DIRECTORY CONFIG" + organizational_unit_distinguished_names = ["DISTINGUISHED NAME"] + service_account_credentials { + account_name = "NAME OF ACCOUNT" + account_password = "PASSWORD OF ACCOUNT" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `directory_name` - (Required) Fully qualified name of the directory. +* `organizational_unit_distinguished_names` - (Required) Distinguished names of the organizational units for computer accounts. +* `service_account_credentials` - (Required) Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See below. + +### `service_account_credentials` + +* `account_name` - (Required) User name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified. +* `account_password` - (Required) Password for the account. + + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Unique identifier (ID) of the appstream directory config. +* `created_time` - Date and time, in UTC and extended RFC 3339 format, when the directory config was created. + +## Import + +`aws_appstream_directory_config` can be imported using the id, e.g., + +``` +$ terraform import aws_appstream_directory_config.example directoryNameExample +``` From ef817614ed4ab1cc66b8a55af98d2dc6372a8846 Mon Sep 17 00:00:00 2001 From: Edgar Lopez Date: Fri, 22 Oct 2021 17:37:09 -0600 Subject: [PATCH 047/304] refactor --- internal/service/appstream/directory_config_test.go | 2 ++ website/docs/r/appstream_directory_config.html.markdown | 1 + 2 files changed, 3 insertions(+) diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index bedd6128e274..d3acdc2ba40f 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -86,6 +86,7 @@ func TestAccAppStreamDirectoryConfig_disappears(t *testing.T) { }, }) } + func testAccCheckDirectoryConfigExists(resourceName string, appStreamDirectoryConfig *appstream.DirectoryConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -147,6 +148,7 @@ data "aws_organizations_organizational_units" "test" { resource "aws_appstream_directory_config" "test" { directory_name = %[1]q organizational_unit_distinguished_names = data.aws_organizations_organizational_units.test.children.*.id + service_account_credentials{ account_name = %[2]q account_password = %[3]q diff --git a/website/docs/r/appstream_directory_config.html.markdown b/website/docs/r/appstream_directory_config.html.markdown index 52911f705b0e..19494cdd5c01 100644 --- a/website/docs/r/appstream_directory_config.html.markdown +++ b/website/docs/r/appstream_directory_config.html.markdown @@ -16,6 +16,7 @@ Provides an AppStream Directory Config. resource "aws_appstream_directory_config" "example" { directory_name = "NAME OF DIRECTORY CONFIG" organizational_unit_distinguished_names = ["DISTINGUISHED NAME"] + service_account_credentials { account_name = "NAME OF ACCOUNT" account_password = "PASSWORD OF ACCOUNT" From deb8a56532d0154335cd1af87010386c09bcaeb4 Mon Sep 17 00:00:00 2001 From: Edgar Lopez Date: Tue, 26 Oct 2021 13:16:36 -0600 Subject: [PATCH 048/304] added changelog --- .changelog/21505.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21505.txt diff --git a/.changelog/21505.txt b/.changelog/21505.txt new file mode 100644 index 000000000000..1cbb5c33c435 --- /dev/null +++ b/.changelog/21505.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_appstream_directory_config +``` \ No newline at end of file From b4d5132dd805cf7b072245e4fabc9be7d567f437 Mon Sep 17 00:00:00 2001 From: Edgar Lopez Date: Tue, 26 Oct 2021 13:32:21 -0600 Subject: [PATCH 049/304] terrafmt --- website/docs/r/appstream_directory_config.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/appstream_directory_config.html.markdown b/website/docs/r/appstream_directory_config.html.markdown index 19494cdd5c01..47fcc9d5a309 100644 --- a/website/docs/r/appstream_directory_config.html.markdown +++ b/website/docs/r/appstream_directory_config.html.markdown @@ -16,7 +16,7 @@ Provides an AppStream Directory Config. resource "aws_appstream_directory_config" "example" { directory_name = "NAME OF DIRECTORY CONFIG" organizational_unit_distinguished_names = ["DISTINGUISHED NAME"] - + service_account_credentials { account_name = "NAME OF ACCOUNT" account_password = "PASSWORD OF ACCOUNT" From aac9196dfedfb518ef3a309da1c1bca2aa45f4bf Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Sun, 31 Oct 2021 22:03:19 +0000 Subject: [PATCH 050/304] Added securityhub finding aggregator resource --- internal/provider/provider.go | 1 + .../service/securityhub/finding_aggregator.go | 176 ++++++++++++++++++ .../securityhub/finding_aggregator_test.go | 166 +++++++++++++++++ .../r/securityhub_finding_aggregator.markdown | 76 ++++++++ 4 files changed, 419 insertions(+) create mode 100644 internal/service/securityhub/finding_aggregator.go create mode 100644 internal/service/securityhub/finding_aggregator_test.go create mode 100644 website/docs/r/securityhub_finding_aggregator.markdown diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 7471764fb8fd..921b8f9f75ef 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1519,6 +1519,7 @@ func Provider() *schema.Provider { "aws_securityhub_product_subscription": securityhub.ResourceProductSubscription(), "aws_securityhub_standards_control": securityhub.ResourceStandardsControl(), "aws_securityhub_standards_subscription": securityhub.ResourceStandardsSubscription(), + "aws_securityhub_finding_aggregator": securityhub.ResourceFindingAggregator(), "aws_serverlessapplicationrepository_cloudformation_stack": serverlessapprepo.ResourceCloudFormationStack(), diff --git a/internal/service/securityhub/finding_aggregator.go b/internal/service/securityhub/finding_aggregator.go new file mode 100644 index 000000000000..a8e93810f9b3 --- /dev/null +++ b/internal/service/securityhub/finding_aggregator.go @@ -0,0 +1,176 @@ +package securityhub + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" +) + +const ( + allRegions = "ALL_REGIONS" + allRegionsExceptSpecified = "ALL_REGIONS_EXCEPT_SPECIFIED" + specifiedRegions = "SPECIFIED_REGIONS" +) + +func ResourceFindingAggregator() *schema.Resource { + return &schema.Resource{ + Create: resourceFindingAggregatorCreate, + Read: resourceFindingAggregatorRead, + Update: resourceFindingAggregatorUpdate, + Delete: resourceFindingAggregatorDelete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "linking_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + allRegions, + allRegionsExceptSpecified, + specifiedRegions, + }, false), + }, + "specified_regions": { + Type: schema.TypeSet, + MinItems: 1, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceFindingAggregatorCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).SecurityHubConn + + linkingMode := d.Get("linking_mode").(string) + + req := &securityhub.CreateFindingAggregatorInput{ + RegionLinkingMode: &linkingMode, + } + + if v, ok := d.GetOk("specified_regions"); ok && (linkingMode == allRegionsExceptSpecified || linkingMode == specifiedRegions) { + req.Regions = flex.ExpandStringSet(v.(*schema.Set)) + } + + log.Printf("[DEBUG] Creating Security Hub finding aggregator") + + resp, err := conn.CreateFindingAggregator(req) + + if err != nil { + return fmt.Errorf("Error creating finding aggregator for Security Hub: %s", err) + } + + d.SetId(aws.StringValue(resp.FindingAggregatorArn)) + + return resourceFindingAggregatorRead(d, meta) +} + +func resourceFindingAggregatorRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).SecurityHubConn + + aggregatorArn := d.Id() + + log.Printf("[DEBUG] Reading Security Hub finding aggregator to find %s", aggregatorArn) + + aggregator, err := FindingAggregatorCheckExists(conn, aggregatorArn) + + if err != nil { + return fmt.Errorf("Error reading Security Hub finding aggregator to find %s: %s", aggregatorArn, err) + } + + if aggregator == nil { + log.Printf("[WARN] Security Hub finding aggregator (%s) not found, removing from state", aggregatorArn) + d.SetId("") + return nil + } + + d.Set("linking_mode", aggregator.RegionLinkingMode) + + if len(aggregator.Regions) > 0 { + d.Set("specified_regions", flex.FlattenStringList(aggregator.Regions)) + } + + return nil +} + +func FindingAggregatorCheckExists(conn *securityhub.SecurityHub, findingAggregatorArn string) (*securityhub.GetFindingAggregatorOutput, error) { + input := &securityhub.ListFindingAggregatorsInput{} + + var found *securityhub.GetFindingAggregatorOutput + var err error = nil + + err = conn.ListFindingAggregatorsPages(input, func(page *securityhub.ListFindingAggregatorsOutput, lastPage bool) bool { + for _, aggregator := range page.FindingAggregators { + if aws.StringValue(aggregator.FindingAggregatorArn) == findingAggregatorArn { + getInput := &securityhub.GetFindingAggregatorInput{ + FindingAggregatorArn: &findingAggregatorArn, + } + found, err = conn.GetFindingAggregator(getInput) + return false + } + } + return !lastPage + }) + + if err != nil { + return nil, err + } + + return found, nil +} + +func resourceFindingAggregatorUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).SecurityHubConn + + aggregatorArn := d.Id() + + linkingMode := d.Get("linking_mode").(string) + + req := &securityhub.UpdateFindingAggregatorInput{ + FindingAggregatorArn: &aggregatorArn, + RegionLinkingMode: &linkingMode, + } + + if v, ok := d.GetOk("specified_regions"); ok && (linkingMode == allRegionsExceptSpecified || linkingMode == specifiedRegions) { + req.Regions = flex.ExpandStringSet(v.(*schema.Set)) + } + + resp, err := conn.UpdateFindingAggregator(req) + + if err != nil { + return fmt.Errorf("Error updating Security Hub finding aggregator (%s): %w", aggregatorArn, err) + } + + d.SetId(aws.StringValue(resp.FindingAggregatorArn)) + + return resourceFindingAggregatorRead(d, meta) +} + +func resourceFindingAggregatorDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).SecurityHubConn + + aggregatorArn := d.Id() + + log.Printf("[DEBUG] Disabling Security Hub finding aggregator %s", aggregatorArn) + + _, err := conn.DeleteFindingAggregator(&securityhub.DeleteFindingAggregatorInput{ + FindingAggregatorArn: &aggregatorArn, + }) + + if err != nil { + return fmt.Errorf("Error disabling Security Hub finding aggregator %s: %s", aggregatorArn, err) + } + + return nil +} diff --git a/internal/service/securityhub/finding_aggregator_test.go b/internal/service/securityhub/finding_aggregator_test.go new file mode 100644 index 000000000000..07d3accbf225 --- /dev/null +++ b/internal/service/securityhub/finding_aggregator_test.go @@ -0,0 +1,166 @@ +package securityhub_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfsecurityhub "github.com/hashicorp/terraform-provider-aws/internal/service/securityhub" +) + +func TestAccFindingAggregator_basic(t *testing.T) { + resourceName := "aws_securityhub_finding_aggregator.test_aggregator" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, securityhub.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFindingAggregatorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFindingAggregatorAllRegionsConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFindingAggregatorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "linking_mode", "ALL_REGIONS"), + resource.TestCheckNoResourceAttr(resourceName, "specified_regions"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccFindingAggregatorSpecifiedRegionsConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFindingAggregatorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "linking_mode", "SPECIFIED_REGIONS"), + resource.TestCheckResourceAttr(resourceName, "specified_regions.#", "3"), + ), + }, + { + Config: testAccFindingAggregatorAllRegionsExceptSpecifiedConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFindingAggregatorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "linking_mode", "ALL_REGIONS_EXCEPT_SPECIFIED"), + resource.TestCheckResourceAttr(resourceName, "specified_regions.#", "2"), + ), + }, + }, + }) +} + +func TestAccFindingAggregator_disappears(t *testing.T) { + resourceName := "aws_securityhub_finding_aggregator.test_aggregator" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, securityhub.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFindingAggregatorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFindingAggregatorAllRegionsConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFindingAggregatorExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfsecurityhub.ResourceFindingAggregator(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckFindingAggregatorExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Hub finding aggregator ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityHubConn + + _, err := conn.GetFindingAggregator(&securityhub.GetFindingAggregatorInput{ + FindingAggregatorArn: &rs.Primary.ID, + }) + + if err != nil { + return fmt.Errorf("Failed to get finding aggregator: %s", err) + } + + return nil + } +} + +func testAccCheckFindingAggregatorDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SecurityHubConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_securityhub_finding_aggregator" { + continue + } + + _, err := conn.GetFindingAggregator(&securityhub.GetFindingAggregatorInput{ + FindingAggregatorArn: &rs.Primary.ID, + }) + + if tfawserr.ErrMessageContains(err, securityhub.ErrCodeInvalidAccessException, "not subscribed to AWS Security Hub") { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("Security Hub Finding Aggregator %s still exists", rs.Primary.ID) + } + + return nil +} + +func testAccFindingAggregatorAllRegionsConfig() string { + return ` +resource "aws_securityhub_account" "example" {} + +resource "aws_securityhub_finding_aggregator" "test_aggregator" { + linking_mode = "ALL_REGIONS" + + depends_on = [aws_securityhub_account.example] +} +` +} + +func testAccFindingAggregatorSpecifiedRegionsConfig() string { + return ` +resource "aws_securityhub_account" "example" {} + +resource "aws_securityhub_finding_aggregator" "test_aggregator" { + linking_mode = "SPECIFIED_REGIONS" + specified_regions = ["eu-west-2", "eu-west-1", "us-east-1"] + + depends_on = [aws_securityhub_account.example] +} +` +} + +func testAccFindingAggregatorAllRegionsExceptSpecifiedConfig() string { + return ` +resource "aws_securityhub_account" "example" {} + +resource "aws_securityhub_finding_aggregator" "test_aggregator" { + linking_mode = "ALL_REGIONS_EXCEPT_SPECIFIED" + specified_regions = ["eu-west-2", "eu-west-1"] + + depends_on = [aws_securityhub_account.example] +} +` +} diff --git a/website/docs/r/securityhub_finding_aggregator.markdown b/website/docs/r/securityhub_finding_aggregator.markdown new file mode 100644 index 000000000000..c415d8f77ad8 --- /dev/null +++ b/website/docs/r/securityhub_finding_aggregator.markdown @@ -0,0 +1,76 @@ +--- +subcategory: "Security Hub" +layout: "aws" +page_title: "AWS: aws_securityhub_finding_aggregator" +description: |- + Manages a Security Hub finding aggregator +--- + +# Resource: aws_securityhub_finding_aggregator + +Manages a Security Hub finding aggregator. Security Hub needs to be enabled in a region in order for the aggregator to pull through findings. + +## All Regions Usage + +The following example will enable the aggregator for every region. + +```terraform +resource "aws_securityhub_account" "example" {} + +resource "aws_securityhub_finding_aggregator" "example" { + linking_mode = "ALL_REGIONS" + + depends_on = [aws_securityhub_account.example] +} +``` + +## All Regions Except Specified Regions Usage + +The following example will enable the aggregator for every region except those specified in `specified_regions`. + +```terraform +resource "aws_securityhub_account" "example" {} + +resource "aws_securityhub_finding_aggregator" "example" { + linking_mode = "ALL_REGIONS_EXCEPT_SPECIFIED" + specified_regions = ["eu-west-1", "eu-west-2] + + depends_on = [aws_securityhub_account.example] +} +``` + +## Specified Regions Usage + +The following example will enable the aggregator for every region specified in `specified_regions`. + +```terraform +resource "aws_securityhub_account" "example" {} + +resource "aws_securityhub_finding_aggregator" "example" { + linking_mode = "SPECIFIED_REGIONS" + specified_regions = ["eu-west-1", "eu-west-2] + + depends_on = [aws_securityhub_account.example] +} +``` + +## Argument Reference + +The following arguments are supported: + +- `linking_mode` - (Required) Indicates whether to aggregate findings from all of the available Regions or from a specified list. The options are `ALL_REGIONS`, `ALL_REGIONS_EXCEPT_SPECIFIED` or `SPECIFIED_REGIONS`. When `ALL_REGIONS` or `ALL_REGIONS_EXCEPT_SPECIFIED` are used, Security Hub will automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. +- `specified_regions` - (Optional) List of regions to include or exclude (required if `linking_mode` is set to `ALL_REGIONS_EXCEPT_SPECIFIED` or `SPECIFIED_REGIONS`) + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +- `arn` - Amazon Resource Name (ARN) of the Security Hub finding aggregator. + +## Import + +An existing Security Hub finding aggregator can be imported using the `arn`, e.g., + +``` +$ terraform import aws_securityhub_finding_aggregator.example arn:aws:securityhub:eu-west-1:123456789098:finding-aggregator/abcd1234-abcd-1234-1234-abcdef123456 +``` From 152d710ec9d3f1edac4541bce396fab46f897eb1 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Sun, 31 Oct 2021 22:20:20 +0000 Subject: [PATCH 051/304] updated tests --- internal/service/securityhub/finding_aggregator_test.go | 4 ++-- internal/service/securityhub/securityhub_test.go | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/internal/service/securityhub/finding_aggregator_test.go b/internal/service/securityhub/finding_aggregator_test.go index 07d3accbf225..c3cf0af840a6 100644 --- a/internal/service/securityhub/finding_aggregator_test.go +++ b/internal/service/securityhub/finding_aggregator_test.go @@ -13,7 +13,7 @@ import ( tfsecurityhub "github.com/hashicorp/terraform-provider-aws/internal/service/securityhub" ) -func TestAccFindingAggregator_basic(t *testing.T) { +func testAccFindingAggregator_basic(t *testing.T) { resourceName := "aws_securityhub_finding_aggregator.test_aggregator" resource.Test(t, resource.TestCase{ @@ -55,7 +55,7 @@ func TestAccFindingAggregator_basic(t *testing.T) { }) } -func TestAccFindingAggregator_disappears(t *testing.T) { +func testAccFindingAggregator_disappears(t *testing.T) { resourceName := "aws_securityhub_finding_aggregator.test_aggregator" resource.Test(t, resource.TestCase{ diff --git a/internal/service/securityhub/securityhub_test.go b/internal/service/securityhub/securityhub_test.go index 969997b56e9b..b6b7aacaeb0b 100644 --- a/internal/service/securityhub/securityhub_test.go +++ b/internal/service/securityhub/securityhub_test.go @@ -55,6 +55,10 @@ func TestAccSecurityHub_serial(t *testing.T) { "basic": testAccStandardsSubscription_basic, "disappears": testAccStandardsSubscription_disappears, }, + "FindingAggregator": { + "basic": testAccFindingAggregator_basic, + "disappears": testAccFindingAggregator_disappears, + }, } for group, m := range testCases { From 5757dfd7eb1f6f7bc8beac7cb07c4a6fb429a636 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Sun, 31 Oct 2021 22:35:34 +0000 Subject: [PATCH 052/304] added changelog --- .changelog/21560.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21560.txt diff --git a/.changelog/21560.txt b/.changelog/21560.txt new file mode 100644 index 000000000000..d4d4d07906d6 --- /dev/null +++ b/.changelog/21560.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_securityhub_finding_aggregator +``` \ No newline at end of file From 22d4a796f58721ad1203cda2e077b84c9b72635b Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 1 Nov 2021 14:41:16 +0000 Subject: [PATCH 053/304] linting fixes --- .../securityhub/finding_aggregator_test.go | 13 +++++++------ .../r/securityhub_finding_aggregator.markdown | 16 ++++++++-------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/internal/service/securityhub/finding_aggregator_test.go b/internal/service/securityhub/finding_aggregator_test.go index c3cf0af840a6..a06ef5cec560 100644 --- a/internal/service/securityhub/finding_aggregator_test.go +++ b/internal/service/securityhub/finding_aggregator_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/securityhub" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -140,27 +141,27 @@ resource "aws_securityhub_finding_aggregator" "test_aggregator" { } func testAccFindingAggregatorSpecifiedRegionsConfig() string { - return ` + return fmt.Sprintf(` resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "test_aggregator" { linking_mode = "SPECIFIED_REGIONS" - specified_regions = ["eu-west-2", "eu-west-1", "us-east-1"] + specified_regions = ["%s", "%s", "%s"] depends_on = [aws_securityhub_account.example] } -` +`, endpoints.EuWest1RegionID, endpoints.EuWest2RegionID, endpoints.UsEast1RegionID) } func testAccFindingAggregatorAllRegionsExceptSpecifiedConfig() string { - return ` + return fmt.Sprintf(` resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "test_aggregator" { linking_mode = "ALL_REGIONS_EXCEPT_SPECIFIED" - specified_regions = ["eu-west-2", "eu-west-1"] + specified_regions = ["%s", "%s"] depends_on = [aws_securityhub_account.example] } -` +`, endpoints.EuWest1RegionID, endpoints.EuWest2RegionID) } diff --git a/website/docs/r/securityhub_finding_aggregator.markdown b/website/docs/r/securityhub_finding_aggregator.markdown index c415d8f77ad8..503797664bbd 100644 --- a/website/docs/r/securityhub_finding_aggregator.markdown +++ b/website/docs/r/securityhub_finding_aggregator.markdown @@ -18,9 +18,9 @@ The following example will enable the aggregator for every region. resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "example" { - linking_mode = "ALL_REGIONS" + linking_mode = "ALL_REGIONS" - depends_on = [aws_securityhub_account.example] + depends_on = [aws_securityhub_account.example] } ``` @@ -32,10 +32,10 @@ The following example will enable the aggregator for every region except those s resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "example" { - linking_mode = "ALL_REGIONS_EXCEPT_SPECIFIED" - specified_regions = ["eu-west-1", "eu-west-2] + linking_mode = "ALL_REGIONS_EXCEPT_SPECIFIED" + specified_regions = ["eu-west-1", "eu-west-2"] - depends_on = [aws_securityhub_account.example] + depends_on = [aws_securityhub_account.example] } ``` @@ -47,10 +47,10 @@ The following example will enable the aggregator for every region specified in ` resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "example" { - linking_mode = "SPECIFIED_REGIONS" - specified_regions = ["eu-west-1", "eu-west-2] + linking_mode = "SPECIFIED_REGIONS" + specified_regions = ["eu-west-1", "eu-west-2"] - depends_on = [aws_securityhub_account.example] + depends_on = [aws_securityhub_account.example] } ``` From 84904b06ab7a43af20e97dd342b2f4b665d8d1d3 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:55:05 -0700 Subject: [PATCH 054/304] init setup for replication configuration resource Blocking out general structure for new independent resource for managing the s3 bucket replication configuration settings Pulling over logic from resource s3 bucket to start with --- internal/service/s3/bucket.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index f143f556cb3d..5bc4db22b414 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -818,7 +818,7 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("replication_configuration") { - if err := resourceBucketReplicationConfigurationUpdate(conn, d); err != nil { + if err := resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn, d); err != nil { return err } } @@ -2033,7 +2033,7 @@ func resourceBucketObjectLockConfigurationUpdate(conn *s3.S3, d *schema.Resource return nil } -func resourceBucketReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { +func resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) replicationConfiguration := d.Get("replication_configuration").([]interface{}) From da4da50713b92527668383df4460d708b077ad86 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 09:57:33 -0700 Subject: [PATCH 055/304] adding new resource for replication configurations --- aws/provider.go | 1702 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1702 insertions(+) create mode 100644 aws/provider.go diff --git a/aws/provider.go b/aws/provider.go new file mode 100644 index 000000000000..5641b36adb4e --- /dev/null +++ b/aws/provider.go @@ -0,0 +1,1702 @@ +package aws + +import ( + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/mutexkv" +) + +// Provider returns a *schema.Provider. +func Provider() *schema.Provider { + // TODO: Move the validation to this, requires conditional schemas + // TODO: Move the configuration to this, requires validation + + // The actual provider + provider := &schema.Provider{ + Schema: map[string]*schema.Schema{ + "access_key": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["access_key"], + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["secret_key"], + }, + + "profile": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["profile"], + }, + + "assume_role": assumeRoleSchema(), + + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["shared_credentials_file"], + }, + + "token": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["token"], + }, + + "region": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", + }, nil), + Description: descriptions["region"], + InputDefault: "us-east-1", // lintignore:AWSAT003 + }, + + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Default: 25, + Description: descriptions["max_retries"], + }, + + "allowed_account_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ConflictsWith: []string{"forbidden_account_ids"}, + Set: schema.HashString, + }, + + "forbidden_account_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ConflictsWith: []string{"allowed_account_ids"}, + Set: schema.HashString, + }, + + "default_tags": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Configuration block with settings to default resource tags across all resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Resource tags to default across all resources", + }, + }, + }, + }, + + "endpoints": endpointsSchema(), + + "ignore_tags": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Configuration block with settings to ignore resource tags across all resources.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "keys": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Resource tag keys to ignore across all resources.", + }, + "key_prefixes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Resource tag key prefixes to ignore across all resources.", + }, + }, + }, + }, + + "insecure": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["insecure"], + }, + + "skip_credentials_validation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_credentials_validation"], + }, + + "skip_get_ec2_platforms": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_get_ec2_platforms"], + }, + + "skip_region_validation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_region_validation"], + }, + + "skip_requesting_account_id": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_requesting_account_id"], + }, + + "skip_metadata_api_check": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_metadata_api_check"], + }, + + "s3_force_path_style": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["s3_force_path_style"], + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "aws_acm_certificate": dataSourceAwsAcmCertificate(), + "aws_acmpca_certificate_authority": dataSourceAwsAcmpcaCertificateAuthority(), + "aws_acmpca_certificate": dataSourceAwsAcmpcaCertificate(), + "aws_ami": dataSourceAwsAmi(), + "aws_ami_ids": dataSourceAwsAmiIds(), + "aws_api_gateway_api_key": dataSourceAwsApiGatewayApiKey(), + "aws_api_gateway_domain_name": dataSourceAwsApiGatewayDomainName(), + "aws_api_gateway_resource": dataSourceAwsApiGatewayResource(), + "aws_api_gateway_rest_api": dataSourceAwsApiGatewayRestApi(), + "aws_api_gateway_vpc_link": dataSourceAwsApiGatewayVpcLink(), + "aws_apigatewayv2_api": dataSourceAwsApiGatewayV2Api(), + "aws_apigatewayv2_apis": dataSourceAwsApiGatewayV2Apis(), + "aws_appmesh_mesh": dataSourceAwsAppmeshMesh(), + "aws_appmesh_virtual_service": dataSourceAwsAppmeshVirtualService(), + "aws_arn": dataSourceAwsArn(), + "aws_autoscaling_group": dataSourceAwsAutoscalingGroup(), + "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), + "aws_availability_zone": dataSourceAwsAvailabilityZone(), + "aws_availability_zones": dataSourceAwsAvailabilityZones(), + "aws_backup_plan": dataSourceAwsBackupPlan(), + "aws_backup_selection": dataSourceAwsBackupSelection(), + "aws_backup_vault": dataSourceAwsBackupVault(), + "aws_batch_compute_environment": dataSourceAwsBatchComputeEnvironment(), + "aws_batch_job_queue": dataSourceAwsBatchJobQueue(), + "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), + "aws_caller_identity": dataSourceAwsCallerIdentity(), + "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), + "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), + "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), + "aws_cloudformation_type": dataSourceAwsCloudFormationType(), + "aws_cloudfront_cache_policy": dataSourceAwsCloudFrontCachePolicy(), + "aws_cloudfront_distribution": dataSourceAwsCloudFrontDistribution(), + "aws_cloudfront_function": dataSourceAwsCloudFrontFunction(), + "aws_cloudfront_origin_request_policy": dataSourceAwsCloudFrontOriginRequestPolicy(), + "aws_cloudhsm_v2_cluster": dataSourceCloudHsmV2Cluster(), + "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), + "aws_cloudwatch_event_connection": dataSourceAwsCloudwatchEventConnection(), + "aws_cloudwatch_event_source": dataSourceAwsCloudWatchEventSource(), + "aws_cloudwatch_log_group": dataSourceAwsCloudwatchLogGroup(), + "aws_codeartifact_authorization_token": dataSourceAwsCodeArtifactAuthorizationToken(), + "aws_codeartifact_repository_endpoint": dataSourceAwsCodeArtifactRepositoryEndpoint(), + "aws_cognito_user_pools": dataSourceAwsCognitoUserPools(), + "aws_codecommit_repository": dataSourceAwsCodeCommitRepository(), + "aws_codestarconnections_connection": dataSourceAwsCodeStarConnectionsConnection(), + "aws_cur_report_definition": dataSourceAwsCurReportDefinition(), + "aws_default_tags": dataSourceAwsDefaultTags(), + "aws_db_cluster_snapshot": dataSourceAwsDbClusterSnapshot(), + "aws_db_event_categories": dataSourceAwsDbEventCategories(), + "aws_db_instance": dataSourceAwsDbInstance(), + "aws_db_snapshot": dataSourceAwsDbSnapshot(), + "aws_db_subnet_group": dataSourceAwsDbSubnetGroup(), + "aws_directory_service_directory": dataSourceAwsDirectoryServiceDirectory(), + "aws_docdb_engine_version": dataSourceAwsDocdbEngineVersion(), + "aws_docdb_orderable_db_instance": dataSourceAwsDocdbOrderableDbInstance(), + "aws_dx_gateway": dataSourceAwsDxGateway(), + "aws_dynamodb_table": dataSourceAwsDynamoDbTable(), + "aws_ebs_default_kms_key": dataSourceAwsEbsDefaultKmsKey(), + "aws_ebs_encryption_by_default": dataSourceAwsEbsEncryptionByDefault(), + "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), + "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), + "aws_ebs_volume": dataSourceAwsEbsVolume(), + "aws_ebs_volumes": dataSourceAwsEbsVolumes(), + "aws_ec2_coip_pool": dataSourceAwsEc2CoipPool(), + "aws_ec2_coip_pools": dataSourceAwsEc2CoipPools(), + "aws_ec2_instance_type": dataSourceAwsEc2InstanceType(), + "aws_ec2_instance_type_offering": dataSourceAwsEc2InstanceTypeOffering(), + "aws_ec2_instance_type_offerings": dataSourceAwsEc2InstanceTypeOfferings(), + "aws_ec2_local_gateway": dataSourceAwsEc2LocalGateway(), + "aws_ec2_local_gateways": dataSourceAwsEc2LocalGateways(), + "aws_ec2_local_gateway_route_table": dataSourceAwsEc2LocalGatewayRouteTable(), + "aws_ec2_local_gateway_route_tables": dataSourceAwsEc2LocalGatewayRouteTables(), + "aws_ec2_local_gateway_virtual_interface": dataSourceAwsEc2LocalGatewayVirtualInterface(), + "aws_ec2_local_gateway_virtual_interface_group": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroup(), + "aws_ec2_local_gateway_virtual_interface_groups": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroups(), + "aws_ec2_managed_prefix_list": dataSourceAwsEc2ManagedPrefixList(), + "aws_ec2_spot_price": dataSourceAwsEc2SpotPrice(), + "aws_ec2_transit_gateway": dataSourceAwsEc2TransitGateway(), + "aws_ec2_transit_gateway_dx_gateway_attachment": dataSourceAwsEc2TransitGatewayDxGatewayAttachment(), + "aws_ec2_transit_gateway_peering_attachment": dataSourceAwsEc2TransitGatewayPeeringAttachment(), + "aws_ec2_transit_gateway_route_table": dataSourceAwsEc2TransitGatewayRouteTable(), + "aws_ec2_transit_gateway_route_tables": dataSourceAwsEc2TransitGatewayRouteTables(), + "aws_ec2_transit_gateway_vpc_attachment": dataSourceAwsEc2TransitGatewayVpcAttachment(), + "aws_ec2_transit_gateway_vpn_attachment": dataSourceAwsEc2TransitGatewayVpnAttachment(), + "aws_ecr_authorization_token": dataSourceAwsEcrAuthorizationToken(), + "aws_ecr_image": dataSourceAwsEcrImage(), + "aws_ecr_repository": dataSourceAwsEcrRepository(), + "aws_ecs_cluster": dataSourceAwsEcsCluster(), + "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), + "aws_ecs_service": dataSourceAwsEcsService(), + "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), + "aws_customer_gateway": dataSourceAwsCustomerGateway(), + "aws_efs_access_point": dataSourceAwsEfsAccessPoint(), + "aws_efs_access_points": dataSourceAwsEfsAccessPoints(), + "aws_efs_file_system": dataSourceAwsEfsFileSystem(), + "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), + "aws_eip": dataSourceAwsEip(), + "aws_eks_addon": dataSourceAwsEksAddon(), + "aws_eks_cluster": dataSourceAwsEksCluster(), + "aws_eks_cluster_auth": dataSourceAwsEksClusterAuth(), + "aws_elastic_beanstalk_application": dataSourceAwsElasticBeanstalkApplication(), + "aws_elastic_beanstalk_hosted_zone": dataSourceAwsElasticBeanstalkHostedZone(), + "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), + "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), + "aws_elasticache_replication_group": dataSourceAwsElasticacheReplicationGroup(), + "aws_elasticache_user": dataSourceAwsElastiCacheUser(), + "aws_elasticsearch_domain": dataSourceAwsElasticSearchDomain(), + "aws_elb": dataSourceAwsElb(), + "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), + "aws_elb_service_account": dataSourceAwsElbServiceAccount(), + "aws_globalaccelerator_accelerator": dataSourceAwsGlobalAcceleratorAccelerator(), + "aws_glue_connection": dataSourceAwsGlueConnection(), + "aws_glue_data_catalog_encryption_settings": dataSourceAwsGlueDataCatalogEncryptionSettings(), + "aws_glue_script": dataSourceAwsGlueScript(), + "aws_guardduty_detector": dataSourceAwsGuarddutyDetector(), + "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), + "aws_iam_group": dataSourceAwsIAMGroup(), + "aws_iam_instance_profile": dataSourceAwsIAMInstanceProfile(), + "aws_iam_policy": dataSourceAwsIAMPolicy(), + "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), + "aws_iam_role": dataSourceAwsIAMRole(), + "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), + "aws_iam_session_context": dataSourceAwsIAMSessionContext(), + "aws_iam_user": dataSourceAwsIAMUser(), + "aws_identitystore_group": dataSourceAwsIdentityStoreGroup(), + "aws_identitystore_user": dataSourceAwsIdentityStoreUser(), + "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), + "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), + "aws_imagebuilder_image": dataSourceAwsImageBuilderImage(), + "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), + "aws_imagebuilder_image_recipe": dataSourceAwsImageBuilderImageRecipe(), + "aws_imagebuilder_infrastructure_configuration": datasourceAwsImageBuilderInfrastructureConfiguration(), + "aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(), + "aws_instance": dataSourceAwsInstance(), + "aws_instances": dataSourceAwsInstances(), + "aws_internet_gateway": dataSourceAwsInternetGateway(), + "aws_iot_endpoint": dataSourceAwsIotEndpoint(), + "aws_ip_ranges": dataSourceAwsIPRanges(), + "aws_kinesis_stream": dataSourceAwsKinesisStream(), + "aws_kinesis_stream_consumer": dataSourceAwsKinesisStreamConsumer(), + "aws_kms_alias": dataSourceAwsKmsAlias(), + "aws_kms_ciphertext": dataSourceAwsKmsCiphertext(), + "aws_kms_key": dataSourceAwsKmsKey(), + "aws_kms_public_key": dataSourceAwsKmsPublicKey(), + "aws_kms_secret": dataSourceAwsKmsSecret(), + "aws_kms_secrets": dataSourceAwsKmsSecrets(), + "aws_lakeformation_data_lake_settings": dataSourceAwsLakeFormationDataLakeSettings(), + "aws_lakeformation_permissions": dataSourceAwsLakeFormationPermissions(), + "aws_lakeformation_resource": dataSourceAwsLakeFormationResource(), + "aws_lambda_alias": dataSourceAwsLambdaAlias(), + "aws_lambda_code_signing_config": dataSourceAwsLambdaCodeSigningConfig(), + "aws_lambda_function": dataSourceAwsLambdaFunction(), + "aws_lambda_invocation": dataSourceAwsLambdaInvocation(), + "aws_lambda_layer_version": dataSourceAwsLambdaLayerVersion(), + "aws_launch_configuration": dataSourceAwsLaunchConfiguration(), + "aws_launch_template": dataSourceAwsLaunchTemplate(), + "aws_lex_bot_alias": dataSourceAwsLexBotAlias(), + "aws_lex_bot": dataSourceAwsLexBot(), + "aws_lex_intent": dataSourceAwsLexIntent(), + "aws_lex_slot_type": dataSourceAwsLexSlotType(), + "aws_mq_broker": dataSourceAwsMqBroker(), + "aws_msk_cluster": dataSourceAwsMskCluster(), + "aws_msk_configuration": dataSourceAwsMskConfiguration(), + "aws_nat_gateway": dataSourceAwsNatGateway(), + "aws_neptune_orderable_db_instance": dataSourceAwsNeptuneOrderableDbInstance(), + "aws_neptune_engine_version": dataSourceAwsNeptuneEngineVersion(), + "aws_network_acls": dataSourceAwsNetworkAcls(), + "aws_network_interface": dataSourceAwsNetworkInterface(), + "aws_network_interfaces": dataSourceAwsNetworkInterfaces(), + "aws_organizations_delegated_administrators": dataSourceAwsOrganizationsDelegatedAdministrators(), + "aws_organizations_delegated_services": dataSourceAwsOrganizationsDelegatedServices(), + "aws_organizations_organization": dataSourceAwsOrganizationsOrganization(), + "aws_organizations_organizational_units": dataSourceAwsOrganizationsOrganizationalUnits(), + "aws_outposts_outpost": dataSourceAwsOutpostsOutpost(), + "aws_outposts_outpost_instance_type": dataSourceAwsOutpostsOutpostInstanceType(), + "aws_outposts_outpost_instance_types": dataSourceAwsOutpostsOutpostInstanceTypes(), + "aws_outposts_outposts": dataSourceAwsOutpostsOutposts(), + "aws_outposts_site": dataSourceAwsOutpostsSite(), + "aws_outposts_sites": dataSourceAwsOutpostsSites(), + "aws_partition": dataSourceAwsPartition(), + "aws_prefix_list": dataSourceAwsPrefixList(), + "aws_pricing_product": dataSourceAwsPricingProduct(), + "aws_qldb_ledger": dataSourceAwsQLDBLedger(), + "aws_ram_resource_share": dataSourceAwsRamResourceShare(), + "aws_rds_certificate": dataSourceAwsRdsCertificate(), + "aws_rds_cluster": dataSourceAwsRdsCluster(), + "aws_rds_engine_version": dataSourceAwsRdsEngineVersion(), + "aws_rds_orderable_db_instance": dataSourceAwsRdsOrderableDbInstance(), + "aws_redshift_cluster": dataSourceAwsRedshiftCluster(), + "aws_redshift_orderable_cluster": dataSourceAwsRedshiftOrderableCluster(), + "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), + "aws_region": dataSourceAwsRegion(), + "aws_regions": dataSourceAwsRegions(), + "aws_resourcegroupstaggingapi_resources": dataSourceAwsResourceGroupsTaggingAPIResources(), + "aws_route": dataSourceAwsRoute(), + "aws_route_table": dataSourceAwsRouteTable(), + "aws_route_tables": dataSourceAwsRouteTables(), + "aws_route53_delegation_set": dataSourceAwsDelegationSet(), + "aws_route53_resolver_endpoint": dataSourceAwsRoute53ResolverEndpoint(), + "aws_route53_resolver_rule": dataSourceAwsRoute53ResolverRule(), + "aws_route53_resolver_rules": dataSourceAwsRoute53ResolverRules(), + "aws_route53_zone": dataSourceAwsRoute53Zone(), + "aws_s3_bucket": dataSourceAwsS3Bucket(), + "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), + "aws_s3_bucket_objects": dataSourceAwsS3BucketObjects(), + "aws_sagemaker_prebuilt_ecr_image": dataSourceAwsSageMakerPrebuiltECRImage(), + "aws_secretsmanager_secret": dataSourceAwsSecretsManagerSecret(), + "aws_secretsmanager_secret_rotation": dataSourceAwsSecretsManagerSecretRotation(), + "aws_secretsmanager_secret_version": dataSourceAwsSecretsManagerSecretVersion(), + "aws_servicecatalog_constraint": dataSourceAwsServiceCatalogConstraint(), + "aws_servicecatalog_launch_paths": dataSourceAwsServiceCatalogLaunchPaths(), + "aws_servicecatalog_portfolio_constraints": dataSourceAwsServiceCatalogPortfolioConstraints(), + "aws_servicecatalog_portfolio": dataSourceAwsServiceCatalogPortfolio(), + "aws_servicecatalog_product": dataSourceAwsServiceCatalogProduct(), + "aws_servicequotas_service": dataSourceAwsServiceQuotasService(), + "aws_servicequotas_service_quota": dataSourceAwsServiceQuotasServiceQuota(), + "aws_service_discovery_dns_namespace": dataSourceServiceDiscoveryDnsNamespace(), + "aws_sfn_activity": dataSourceAwsSfnActivity(), + "aws_sfn_state_machine": dataSourceAwsSfnStateMachine(), + "aws_signer_signing_job": dataSourceAwsSignerSigningJob(), + "aws_signer_signing_profile": dataSourceAwsSignerSigningProfile(), + "aws_sns_topic": dataSourceAwsSnsTopic(), + "aws_sqs_queue": dataSourceAwsSqsQueue(), + "aws_ssm_document": dataSourceAwsSsmDocument(), + "aws_ssm_parameter": dataSourceAwsSsmParameter(), + "aws_ssm_patch_baseline": dataSourceAwsSsmPatchBaseline(), + "aws_ssoadmin_instances": dataSourceAwsSsoAdminInstances(), + "aws_ssoadmin_permission_set": dataSourceAwsSsoAdminPermissionSet(), + "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), + "aws_subnet": dataSourceAwsSubnet(), + "aws_subnet_ids": dataSourceAwsSubnetIDs(), + "aws_transfer_server": dataSourceAwsTransferServer(), + "aws_vpcs": dataSourceAwsVpcs(), + "aws_security_group": dataSourceAwsSecurityGroup(), + "aws_security_groups": dataSourceAwsSecurityGroups(), + "aws_vpc": dataSourceAwsVpc(), + "aws_vpc_dhcp_options": dataSourceAwsVpcDhcpOptions(), + "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), + "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), + "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), + "aws_vpc_peering_connections": dataSourceAwsVpcPeeringConnections(), + "aws_vpn_gateway": dataSourceAwsVpnGateway(), + "aws_waf_ipset": dataSourceAwsWafIpSet(), + "aws_waf_rule": dataSourceAwsWafRule(), + "aws_waf_rate_based_rule": dataSourceAwsWafRateBasedRule(), + "aws_waf_web_acl": dataSourceAwsWafWebAcl(), + "aws_wafregional_ipset": dataSourceAwsWafRegionalIpSet(), + "aws_wafregional_rule": dataSourceAwsWafRegionalRule(), + "aws_wafregional_rate_based_rule": dataSourceAwsWafRegionalRateBasedRule(), + "aws_wafregional_web_acl": dataSourceAwsWafRegionalWebAcl(), + "aws_wafv2_ip_set": dataSourceAwsWafv2IPSet(), + "aws_wafv2_regex_pattern_set": dataSourceAwsWafv2RegexPatternSet(), + "aws_wafv2_rule_group": dataSourceAwsWafv2RuleGroup(), + "aws_wafv2_web_acl": dataSourceAwsWafv2WebACL(), + "aws_workspaces_bundle": dataSourceAwsWorkspacesBundle(), + "aws_workspaces_directory": dataSourceAwsWorkspacesDirectory(), + "aws_workspaces_image": dataSourceAwsWorkspacesImage(), + "aws_workspaces_workspace": dataSourceAwsWorkspacesWorkspace(), + + // Adding the Aliases for the ALB -> LB Rename + "aws_lb": dataSourceAwsLb(), + "aws_alb": dataSourceAwsLb(), + "aws_lb_listener": dataSourceAwsLbListener(), + "aws_alb_listener": dataSourceAwsLbListener(), + "aws_lb_target_group": dataSourceAwsLbTargetGroup(), + "aws_alb_target_group": dataSourceAwsLbTargetGroup(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "aws_accessanalyzer_analyzer": resourceAwsAccessAnalyzerAnalyzer(), + "aws_acm_certificate": resourceAwsAcmCertificate(), + "aws_acm_certificate_validation": resourceAwsAcmCertificateValidation(), + "aws_acmpca_certificate_authority": resourceAwsAcmpcaCertificateAuthority(), + "aws_acmpca_certificate_authority_certificate": resourceAwsAcmpcaCertificateAuthorityCertificate(), + "aws_acmpca_certificate": resourceAwsAcmpcaCertificate(), + "aws_ami": resourceAwsAmi(), + "aws_ami_copy": resourceAwsAmiCopy(), + "aws_ami_from_instance": resourceAwsAmiFromInstance(), + "aws_ami_launch_permission": resourceAwsAmiLaunchPermission(), + "aws_amplify_app": resourceAwsAmplifyApp(), + "aws_amplify_backend_environment": resourceAwsAmplifyBackendEnvironment(), + "aws_amplify_branch": resourceAwsAmplifyBranch(), + "aws_amplify_domain_association": resourceAwsAmplifyDomainAssociation(), + "aws_amplify_webhook": resourceAwsAmplifyWebhook(), + "aws_api_gateway_account": resourceAwsApiGatewayAccount(), + "aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(), + "aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(), + "aws_api_gateway_base_path_mapping": resourceAwsApiGatewayBasePathMapping(), + "aws_api_gateway_client_certificate": resourceAwsApiGatewayClientCertificate(), + "aws_api_gateway_deployment": resourceAwsApiGatewayDeployment(), + "aws_api_gateway_documentation_part": resourceAwsApiGatewayDocumentationPart(), + "aws_api_gateway_documentation_version": resourceAwsApiGatewayDocumentationVersion(), + "aws_api_gateway_domain_name": resourceAwsApiGatewayDomainName(), + "aws_api_gateway_gateway_response": resourceAwsApiGatewayGatewayResponse(), + "aws_api_gateway_integration": resourceAwsApiGatewayIntegration(), + "aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(), + "aws_api_gateway_method": resourceAwsApiGatewayMethod(), + "aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(), + "aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(), + "aws_api_gateway_model": resourceAwsApiGatewayModel(), + "aws_api_gateway_request_validator": resourceAwsApiGatewayRequestValidator(), + "aws_api_gateway_resource": resourceAwsApiGatewayResource(), + "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), + "aws_api_gateway_rest_api_policy": resourceAwsApiGatewayRestApiPolicy(), + "aws_api_gateway_stage": resourceAwsApiGatewayStage(), + "aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(), + "aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(), + "aws_api_gateway_vpc_link": resourceAwsApiGatewayVpcLink(), + "aws_apigatewayv2_api": resourceAwsApiGatewayV2Api(), + "aws_apigatewayv2_api_mapping": resourceAwsApiGatewayV2ApiMapping(), + "aws_apigatewayv2_authorizer": resourceAwsApiGatewayV2Authorizer(), + "aws_apigatewayv2_deployment": resourceAwsApiGatewayV2Deployment(), + "aws_apigatewayv2_domain_name": resourceAwsApiGatewayV2DomainName(), + "aws_apigatewayv2_integration": resourceAwsApiGatewayV2Integration(), + "aws_apigatewayv2_integration_response": resourceAwsApiGatewayV2IntegrationResponse(), + "aws_apigatewayv2_model": resourceAwsApiGatewayV2Model(), + "aws_apigatewayv2_route": resourceAwsApiGatewayV2Route(), + "aws_apigatewayv2_route_response": resourceAwsApiGatewayV2RouteResponse(), + "aws_apigatewayv2_stage": resourceAwsApiGatewayV2Stage(), + "aws_apigatewayv2_vpc_link": resourceAwsApiGatewayV2VpcLink(), + "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), + "aws_appautoscaling_target": resourceAwsAppautoscalingTarget(), + "aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(), + "aws_appautoscaling_scheduled_action": resourceAwsAppautoscalingScheduledAction(), + "aws_appconfig_application": resourceAwsAppconfigApplication(), + "aws_appconfig_configuration_profile": resourceAwsAppconfigConfigurationProfile(), + "aws_appconfig_deployment": resourceAwsAppconfigDeployment(), + "aws_appconfig_deployment_strategy": resourceAwsAppconfigDeploymentStrategy(), + "aws_appconfig_environment": resourceAwsAppconfigEnvironment(), + "aws_appconfig_hosted_configuration_version": resourceAwsAppconfigHostedConfigurationVersion(), + "aws_appmesh_gateway_route": resourceAwsAppmeshGatewayRoute(), + "aws_appmesh_mesh": resourceAwsAppmeshMesh(), + "aws_appmesh_route": resourceAwsAppmeshRoute(), + "aws_appmesh_virtual_gateway": resourceAwsAppmeshVirtualGateway(), + "aws_appmesh_virtual_node": resourceAwsAppmeshVirtualNode(), + "aws_appmesh_virtual_router": resourceAwsAppmeshVirtualRouter(), + "aws_appmesh_virtual_service": resourceAwsAppmeshVirtualService(), + "aws_apprunner_auto_scaling_configuration_version": resourceAwsAppRunnerAutoScalingConfigurationVersion(), + "aws_apprunner_connection": resourceAwsAppRunnerConnection(), + "aws_apprunner_custom_domain_association": resourceAwsAppRunnerCustomDomainAssociation(), + "aws_apprunner_service": resourceAwsAppRunnerService(), + "aws_appsync_api_key": resourceAwsAppsyncApiKey(), + "aws_appsync_datasource": resourceAwsAppsyncDatasource(), + "aws_appsync_function": resourceAwsAppsyncFunction(), + "aws_appsync_graphql_api": resourceAwsAppsyncGraphqlApi(), + "aws_appsync_resolver": resourceAwsAppsyncResolver(), + "aws_athena_database": resourceAwsAthenaDatabase(), + "aws_athena_named_query": resourceAwsAthenaNamedQuery(), + "aws_athena_workgroup": resourceAwsAthenaWorkgroup(), + "aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(), + "aws_autoscaling_group": resourceAwsAutoscalingGroup(), + "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), + "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), + "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(), + "aws_autoscalingplans_scaling_plan": resourceAwsAutoScalingPlansScalingPlan(), + "aws_backup_global_settings": resourceAwsBackupGlobalSettings(), + "aws_backup_plan": resourceAwsBackupPlan(), + "aws_backup_region_settings": resourceAwsBackupRegionSettings(), + "aws_backup_selection": resourceAwsBackupSelection(), + "aws_backup_vault": resourceAwsBackupVault(), + "aws_backup_vault_notifications": resourceAwsBackupVaultNotifications(), + "aws_backup_vault_policy": resourceAwsBackupVaultPolicy(), + "aws_budgets_budget": resourceAwsBudgetsBudget(), + "aws_budgets_budget_action": resourceAwsBudgetsBudgetAction(), + "aws_cloud9_environment_ec2": resourceAwsCloud9EnvironmentEc2(), + "aws_cloudformation_stack": resourceAwsCloudFormationStack(), + "aws_cloudformation_stack_set": resourceAwsCloudFormationStackSet(), + "aws_cloudformation_stack_set_instance": resourceAwsCloudFormationStackSetInstance(), + "aws_cloudformation_type": resourceAwsCloudFormationType(), + "aws_cloudfront_cache_policy": resourceAwsCloudFrontCachePolicy(), + "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), + "aws_cloudfront_function": resourceAwsCloudFrontFunction(), + "aws_cloudfront_key_group": resourceAwsCloudFrontKeyGroup(), + "aws_cloudfront_monitoring_subscription": resourceAwsCloudFrontMonitoringSubscription(), + "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), + "aws_cloudfront_origin_request_policy": resourceAwsCloudFrontOriginRequestPolicy(), + "aws_cloudfront_public_key": resourceAwsCloudFrontPublicKey(), + "aws_cloudfront_realtime_log_config": resourceAwsCloudFrontRealtimeLogConfig(), + "aws_cloudtrail": resourceAwsCloudTrail(), + "aws_cloudwatch_event_bus": resourceAwsCloudWatchEventBus(), + "aws_cloudwatch_event_bus_policy": resourceAwsCloudWatchEventBusPolicy(), + "aws_cloudwatch_event_permission": resourceAwsCloudWatchEventPermission(), + "aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(), + "aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(), + "aws_cloudwatch_event_archive": resourceAwsCloudWatchEventArchive(), + "aws_cloudwatch_event_connection": resourceAwsCloudWatchEventConnection(), + "aws_cloudwatch_event_api_destination": resourceAwsCloudWatchEventApiDestination(), + "aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(), + "aws_cloudwatch_log_destination_policy": resourceAwsCloudWatchLogDestinationPolicy(), + "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), + "aws_cloudwatch_log_metric_filter": resourceAwsCloudWatchLogMetricFilter(), + "aws_cloudwatch_log_resource_policy": resourceAwsCloudWatchLogResourcePolicy(), + "aws_cloudwatch_log_stream": resourceAwsCloudWatchLogStream(), + "aws_cloudwatch_log_subscription_filter": resourceAwsCloudwatchLogSubscriptionFilter(), + "aws_config_aggregate_authorization": resourceAwsConfigAggregateAuthorization(), + "aws_config_config_rule": resourceAwsConfigConfigRule(), + "aws_config_configuration_aggregator": resourceAwsConfigConfigurationAggregator(), + "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), + "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), + "aws_config_conformance_pack": resourceAwsConfigConformancePack(), + "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), + "aws_config_organization_conformance_pack": resourceAwsConfigOrganizationConformancePack(), + "aws_config_organization_custom_rule": resourceAwsConfigOrganizationCustomRule(), + "aws_config_organization_managed_rule": resourceAwsConfigOrganizationManagedRule(), + "aws_config_remediation_configuration": resourceAwsConfigRemediationConfiguration(), + "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), + "aws_cognito_identity_pool_roles_attachment": resourceAwsCognitoIdentityPoolRolesAttachment(), + "aws_cognito_identity_provider": resourceAwsCognitoIdentityProvider(), + "aws_cognito_resource_server": resourceAwsCognitoResourceServer(), + "aws_cognito_user_group": resourceAwsCognitoUserGroup(), + "aws_cognito_user_pool": resourceAwsCognitoUserPool(), + "aws_cognito_user_pool_client": resourceAwsCognitoUserPoolClient(), + "aws_cognito_user_pool_domain": resourceAwsCognitoUserPoolDomain(), + "aws_cognito_user_pool_ui_customization": resourceAwsCognitoUserPoolUICustomization(), + "aws_cloudhsm_v2_cluster": resourceAwsCloudHsmV2Cluster(), + "aws_cloudhsm_v2_hsm": resourceAwsCloudHsmV2Hsm(), + "aws_cloudwatch_composite_alarm": resourceAwsCloudWatchCompositeAlarm(), + "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), + "aws_cloudwatch_dashboard": resourceAwsCloudWatchDashboard(), + "aws_cloudwatch_metric_stream": resourceAwsCloudWatchMetricStream(), + "aws_cloudwatch_query_definition": resourceAwsCloudWatchQueryDefinition(), + "aws_codedeploy_app": resourceAwsCodeDeployApp(), + "aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(), + "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), + "aws_codecommit_repository": resourceAwsCodeCommitRepository(), + "aws_codecommit_trigger": resourceAwsCodeCommitTrigger(), + "aws_codeartifact_domain": resourceAwsCodeArtifactDomain(), + "aws_codeartifact_domain_permissions_policy": resourceAwsCodeArtifactDomainPermissionsPolicy(), + "aws_codeartifact_repository": resourceAwsCodeArtifactRepository(), + "aws_codeartifact_repository_permissions_policy": resourceAwsCodeArtifactRepositoryPermissionsPolicy(), + "aws_codebuild_project": resourceAwsCodeBuildProject(), + "aws_codebuild_report_group": resourceAwsCodeBuildReportGroup(), + "aws_codebuild_source_credential": resourceAwsCodeBuildSourceCredential(), + "aws_codebuild_webhook": resourceAwsCodeBuildWebhook(), + "aws_codepipeline": resourceAwsCodePipeline(), + "aws_codepipeline_webhook": resourceAwsCodePipelineWebhook(), + "aws_codestarconnections_connection": resourceAwsCodeStarConnectionsConnection(), + "aws_codestarconnections_host": resourceAwsCodeStarConnectionsHost(), + "aws_codestarnotifications_notification_rule": resourceAwsCodeStarNotificationsNotificationRule(), + "aws_cur_report_definition": resourceAwsCurReportDefinition(), + "aws_customer_gateway": resourceAwsCustomerGateway(), + "aws_datapipeline_pipeline": resourceAwsDataPipelinePipeline(), + "aws_datasync_agent": resourceAwsDataSyncAgent(), + "aws_datasync_location_efs": resourceAwsDataSyncLocationEfs(), + "aws_datasync_location_fsx_windows_file_system": resourceAwsDataSyncLocationFsxWindowsFileSystem(), + "aws_datasync_location_nfs": resourceAwsDataSyncLocationNfs(), + "aws_datasync_location_s3": resourceAwsDataSyncLocationS3(), + "aws_datasync_location_smb": resourceAwsDataSyncLocationSmb(), + "aws_datasync_task": resourceAwsDataSyncTask(), + "aws_dax_cluster": resourceAwsDaxCluster(), + "aws_dax_parameter_group": resourceAwsDaxParameterGroup(), + "aws_dax_subnet_group": resourceAwsDaxSubnetGroup(), + "aws_db_cluster_snapshot": resourceAwsDbClusterSnapshot(), + "aws_db_event_subscription": resourceAwsDbEventSubscription(), + "aws_db_instance": resourceAwsDbInstance(), + "aws_db_instance_role_association": resourceAwsDbInstanceRoleAssociation(), + "aws_db_option_group": resourceAwsDbOptionGroup(), + "aws_db_parameter_group": resourceAwsDbParameterGroup(), + "aws_db_proxy": resourceAwsDbProxy(), + "aws_db_proxy_default_target_group": resourceAwsDbProxyDefaultTargetGroup(), + "aws_db_proxy_endpoint": resourceAwsDbProxyEndpoint(), + "aws_db_proxy_target": resourceAwsDbProxyTarget(), + "aws_db_security_group": resourceAwsDbSecurityGroup(), + "aws_db_snapshot": resourceAwsDbSnapshot(), + "aws_db_subnet_group": resourceAwsDbSubnetGroup(), + "aws_devicefarm_project": resourceAwsDevicefarmProject(), + "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), + "aws_directory_service_conditional_forwarder": resourceAwsDirectoryServiceConditionalForwarder(), + "aws_directory_service_log_subscription": resourceAwsDirectoryServiceLogSubscription(), + "aws_dlm_lifecycle_policy": resourceAwsDlmLifecyclePolicy(), + "aws_dms_certificate": resourceAwsDmsCertificate(), + "aws_dms_endpoint": resourceAwsDmsEndpoint(), + "aws_dms_event_subscription": resourceAwsDmsEventSubscription(), + "aws_dms_replication_instance": resourceAwsDmsReplicationInstance(), + "aws_dms_replication_subnet_group": resourceAwsDmsReplicationSubnetGroup(), + "aws_dms_replication_task": resourceAwsDmsReplicationTask(), + "aws_docdb_cluster": resourceAwsDocDBCluster(), + "aws_docdb_cluster_instance": resourceAwsDocDBClusterInstance(), + "aws_docdb_cluster_parameter_group": resourceAwsDocDBClusterParameterGroup(), + "aws_docdb_cluster_snapshot": resourceAwsDocDBClusterSnapshot(), + "aws_docdb_subnet_group": resourceAwsDocDBSubnetGroup(), + "aws_dx_bgp_peer": resourceAwsDxBgpPeer(), + "aws_dx_connection": resourceAwsDxConnection(), + "aws_dx_connection_association": resourceAwsDxConnectionAssociation(), + "aws_dx_gateway": resourceAwsDxGateway(), + "aws_dx_gateway_association": resourceAwsDxGatewayAssociation(), + "aws_dx_gateway_association_proposal": resourceAwsDxGatewayAssociationProposal(), + "aws_dx_hosted_private_virtual_interface": resourceAwsDxHostedPrivateVirtualInterface(), + "aws_dx_hosted_private_virtual_interface_accepter": resourceAwsDxHostedPrivateVirtualInterfaceAccepter(), + "aws_dx_hosted_public_virtual_interface": resourceAwsDxHostedPublicVirtualInterface(), + "aws_dx_hosted_public_virtual_interface_accepter": resourceAwsDxHostedPublicVirtualInterfaceAccepter(), + "aws_dx_hosted_transit_virtual_interface": resourceAwsDxHostedTransitVirtualInterface(), + "aws_dx_hosted_transit_virtual_interface_accepter": resourceAwsDxHostedTransitVirtualInterfaceAccepter(), + "aws_dx_lag": resourceAwsDxLag(), + "aws_dx_private_virtual_interface": resourceAwsDxPrivateVirtualInterface(), + "aws_dx_public_virtual_interface": resourceAwsDxPublicVirtualInterface(), + "aws_dx_transit_virtual_interface": resourceAwsDxTransitVirtualInterface(), + "aws_dynamodb_table": resourceAwsDynamoDbTable(), + "aws_dynamodb_table_item": resourceAwsDynamoDbTableItem(), + "aws_dynamodb_global_table": resourceAwsDynamoDbGlobalTable(), + "aws_dynamodb_kinesis_streaming_destination": resourceAwsDynamoDbKinesisStreamingDestination(), + "aws_ebs_default_kms_key": resourceAwsEbsDefaultKmsKey(), + "aws_ebs_encryption_by_default": resourceAwsEbsEncryptionByDefault(), + "aws_ebs_snapshot": resourceAwsEbsSnapshot(), + "aws_ebs_snapshot_copy": resourceAwsEbsSnapshotCopy(), + "aws_ebs_snapshot_import": resourceAwsEbsSnapshotImport(), + "aws_ebs_volume": resourceAwsEbsVolume(), + "aws_ec2_availability_zone_group": resourceAwsEc2AvailabilityZoneGroup(), + "aws_ec2_capacity_reservation": resourceAwsEc2CapacityReservation(), + "aws_ec2_carrier_gateway": resourceAwsEc2CarrierGateway(), + "aws_ec2_client_vpn_authorization_rule": resourceAwsEc2ClientVpnAuthorizationRule(), + "aws_ec2_client_vpn_endpoint": resourceAwsEc2ClientVpnEndpoint(), + "aws_ec2_client_vpn_network_association": resourceAwsEc2ClientVpnNetworkAssociation(), + "aws_ec2_client_vpn_route": resourceAwsEc2ClientVpnRoute(), + "aws_ec2_fleet": resourceAwsEc2Fleet(), + "aws_ec2_local_gateway_route": resourceAwsEc2LocalGatewayRoute(), + "aws_ec2_local_gateway_route_table_vpc_association": resourceAwsEc2LocalGatewayRouteTableVpcAssociation(), + "aws_ec2_managed_prefix_list": resourceAwsEc2ManagedPrefixList(), + "aws_ec2_tag": resourceAwsEc2Tag(), + "aws_ec2_traffic_mirror_filter": resourceAwsEc2TrafficMirrorFilter(), + "aws_ec2_traffic_mirror_filter_rule": resourceAwsEc2TrafficMirrorFilterRule(), + "aws_ec2_traffic_mirror_target": resourceAwsEc2TrafficMirrorTarget(), + "aws_ec2_traffic_mirror_session": resourceAwsEc2TrafficMirrorSession(), + "aws_ec2_transit_gateway": resourceAwsEc2TransitGateway(), + "aws_ec2_transit_gateway_peering_attachment": resourceAwsEc2TransitGatewayPeeringAttachment(), + "aws_ec2_transit_gateway_peering_attachment_accepter": resourceAwsEc2TransitGatewayPeeringAttachmentAccepter(), + "aws_ec2_transit_gateway_prefix_list_reference": resourceAwsEc2TransitGatewayPrefixListReference(), + "aws_ec2_transit_gateway_route": resourceAwsEc2TransitGatewayRoute(), + "aws_ec2_transit_gateway_route_table": resourceAwsEc2TransitGatewayRouteTable(), + "aws_ec2_transit_gateway_route_table_association": resourceAwsEc2TransitGatewayRouteTableAssociation(), + "aws_ec2_transit_gateway_route_table_propagation": resourceAwsEc2TransitGatewayRouteTablePropagation(), + "aws_ec2_transit_gateway_vpc_attachment": resourceAwsEc2TransitGatewayVpcAttachment(), + "aws_ec2_transit_gateway_vpc_attachment_accepter": resourceAwsEc2TransitGatewayVpcAttachmentAccepter(), + "aws_ecr_lifecycle_policy": resourceAwsEcrLifecyclePolicy(), + "aws_ecrpublic_repository": resourceAwsEcrPublicRepository(), + "aws_ecr_registry_policy": resourceAwsEcrRegistryPolicy(), + "aws_ecr_replication_configuration": resourceAwsEcrReplicationConfiguration(), + "aws_ecr_repository": resourceAwsEcrRepository(), + "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), + "aws_ecs_capacity_provider": resourceAwsEcsCapacityProvider(), + "aws_ecs_cluster": resourceAwsEcsCluster(), + "aws_ecs_service": resourceAwsEcsService(), + "aws_ecs_task_definition": resourceAwsEcsTaskDefinition(), + "aws_efs_access_point": resourceAwsEfsAccessPoint(), + "aws_efs_backup_policy": resourceAwsEfsBackupPolicy(), + "aws_efs_file_system": resourceAwsEfsFileSystem(), + "aws_efs_file_system_policy": resourceAwsEfsFileSystemPolicy(), + "aws_efs_mount_target": resourceAwsEfsMountTarget(), + "aws_egress_only_internet_gateway": resourceAwsEgressOnlyInternetGateway(), + "aws_eip": resourceAwsEip(), + "aws_eip_association": resourceAwsEipAssociation(), + "aws_eks_cluster": resourceAwsEksCluster(), + "aws_eks_addon": resourceAwsEksAddon(), + "aws_eks_fargate_profile": resourceAwsEksFargateProfile(), + "aws_eks_identity_provider_config": resourceAwsEksIdentityProviderConfig(), + "aws_eks_node_group": resourceAwsEksNodeGroup(), + "aws_elasticache_cluster": resourceAwsElasticacheCluster(), + "aws_elasticache_global_replication_group": resourceAwsElasticacheGlobalReplicationGroup(), + "aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(), + "aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(), + "aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(), + "aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(), + "aws_elasticache_user": resourceAwsElasticacheUser(), + "aws_elasticache_user_group": resourceAwsElasticacheUserGroup(), + "aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(), + "aws_elastic_beanstalk_application_version": resourceAwsElasticBeanstalkApplicationVersion(), + "aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(), + "aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(), + "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), + "aws_elasticsearch_domain_policy": resourceAwsElasticSearchDomainPolicy(), + "aws_elasticsearch_domain_saml_options": resourceAwsElasticSearchDomainSAMLOptions(), + "aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(), + "aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(), + "aws_elb": resourceAwsElb(), + "aws_elb_attachment": resourceAwsElbAttachment(), + "aws_emr_cluster": resourceAwsEMRCluster(), + "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), + "aws_emr_instance_fleet": resourceAwsEMRInstanceFleet(), + "aws_emr_managed_scaling_policy": resourceAwsEMRManagedScalingPolicy(), + "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), + "aws_flow_log": resourceAwsFlowLog(), + "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), + "aws_fsx_windows_file_system": resourceAwsFsxWindowsFileSystem(), + "aws_fms_admin_account": resourceAwsFmsAdminAccount(), + "aws_fms_policy": resourceAwsFmsPolicy(), + "aws_gamelift_alias": resourceAwsGameliftAlias(), + "aws_gamelift_build": resourceAwsGameliftBuild(), + "aws_gamelift_fleet": resourceAwsGameliftFleet(), + "aws_gamelift_game_session_queue": resourceAwsGameliftGameSessionQueue(), + "aws_glacier_vault": resourceAwsGlacierVault(), + "aws_glacier_vault_lock": resourceAwsGlacierVaultLock(), + "aws_globalaccelerator_accelerator": resourceAwsGlobalAcceleratorAccelerator(), + "aws_globalaccelerator_endpoint_group": resourceAwsGlobalAcceleratorEndpointGroup(), + "aws_globalaccelerator_listener": resourceAwsGlobalAcceleratorListener(), + "aws_glue_catalog_database": resourceAwsGlueCatalogDatabase(), + "aws_glue_catalog_table": resourceAwsGlueCatalogTable(), + "aws_glue_classifier": resourceAwsGlueClassifier(), + "aws_glue_connection": resourceAwsGlueConnection(), + "aws_glue_dev_endpoint": resourceAwsGlueDevEndpoint(), + "aws_glue_crawler": resourceAwsGlueCrawler(), + "aws_glue_data_catalog_encryption_settings": resourceAwsGlueDataCatalogEncryptionSettings(), + "aws_glue_job": resourceAwsGlueJob(), + "aws_glue_ml_transform": resourceAwsGlueMLTransform(), + "aws_glue_partition": resourceAwsGluePartition(), + "aws_glue_registry": resourceAwsGlueRegistry(), + "aws_glue_resource_policy": resourceAwsGlueResourcePolicy(), + "aws_glue_schema": resourceAwsGlueSchema(), + "aws_glue_security_configuration": resourceAwsGlueSecurityConfiguration(), + "aws_glue_trigger": resourceAwsGlueTrigger(), + "aws_glue_user_defined_function": resourceAwsGlueUserDefinedFunction(), + "aws_glue_workflow": resourceAwsGlueWorkflow(), + "aws_guardduty_detector": resourceAwsGuardDutyDetector(), + "aws_guardduty_filter": resourceAwsGuardDutyFilter(), + "aws_guardduty_invite_accepter": resourceAwsGuardDutyInviteAccepter(), + "aws_guardduty_ipset": resourceAwsGuardDutyIpset(), + "aws_guardduty_member": resourceAwsGuardDutyMember(), + "aws_guardduty_organization_admin_account": resourceAwsGuardDutyOrganizationAdminAccount(), + "aws_guardduty_organization_configuration": resourceAwsGuardDutyOrganizationConfiguration(), + "aws_guardduty_publishing_destination": resourceAwsGuardDutyPublishingDestination(), + "aws_guardduty_threatintelset": resourceAwsGuardDutyThreatintelset(), + "aws_iam_access_key": resourceAwsIamAccessKey(), + "aws_iam_account_alias": resourceAwsIamAccountAlias(), + "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), + "aws_iam_group_policy": resourceAwsIamGroupPolicy(), + "aws_iam_group": resourceAwsIamGroup(), + "aws_iam_group_membership": resourceAwsIamGroupMembership(), + "aws_iam_group_policy_attachment": resourceAwsIamGroupPolicyAttachment(), + "aws_iam_instance_profile": resourceAwsIamInstanceProfile(), + "aws_iam_openid_connect_provider": resourceAwsIamOpenIDConnectProvider(), + "aws_iam_policy": resourceAwsIamPolicy(), + "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), + "aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(), + "aws_iam_role_policy": resourceAwsIamRolePolicy(), + "aws_iam_role": resourceAwsIamRole(), + "aws_iam_saml_provider": resourceAwsIamSamlProvider(), + "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), + "aws_iam_service_linked_role": resourceAwsIamServiceLinkedRole(), + "aws_iam_user_group_membership": resourceAwsIamUserGroupMembership(), + "aws_iam_user_policy_attachment": resourceAwsIamUserPolicyAttachment(), + "aws_iam_user_policy": resourceAwsIamUserPolicy(), + "aws_iam_user_ssh_key": resourceAwsIamUserSshKey(), + "aws_iam_user": resourceAwsIamUser(), + "aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(), + "aws_imagebuilder_component": resourceAwsImageBuilderComponent(), + "aws_imagebuilder_distribution_configuration": resourceAwsImageBuilderDistributionConfiguration(), + "aws_imagebuilder_image": resourceAwsImageBuilderImage(), + "aws_imagebuilder_image_pipeline": resourceAwsImageBuilderImagePipeline(), + "aws_imagebuilder_image_recipe": resourceAwsImageBuilderImageRecipe(), + "aws_imagebuilder_infrastructure_configuration": resourceAwsImageBuilderInfrastructureConfiguration(), + "aws_inspector_assessment_target": resourceAWSInspectorAssessmentTarget(), + "aws_inspector_assessment_template": resourceAWSInspectorAssessmentTemplate(), + "aws_inspector_resource_group": resourceAWSInspectorResourceGroup(), + "aws_instance": resourceAwsInstance(), + "aws_internet_gateway": resourceAwsInternetGateway(), + "aws_iot_certificate": resourceAwsIotCertificate(), + "aws_iot_policy": resourceAwsIotPolicy(), + "aws_iot_policy_attachment": resourceAwsIotPolicyAttachment(), + "aws_iot_thing": resourceAwsIotThing(), + "aws_iot_thing_principal_attachment": resourceAwsIotThingPrincipalAttachment(), + "aws_iot_thing_type": resourceAwsIotThingType(), + "aws_iot_topic_rule": resourceAwsIotTopicRule(), + "aws_iot_role_alias": resourceAwsIotRoleAlias(), + "aws_key_pair": resourceAwsKeyPair(), + "aws_kinesis_analytics_application": resourceAwsKinesisAnalyticsApplication(), + "aws_kinesisanalyticsv2_application": resourceAwsKinesisAnalyticsV2Application(), + "aws_kinesisanalyticsv2_application_snapshot": resourceAwsKinesisAnalyticsV2ApplicationSnapshot(), + "aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(), + "aws_kinesis_stream": resourceAwsKinesisStream(), + "aws_kinesis_stream_consumer": resourceAwsKinesisStreamConsumer(), + "aws_kinesis_video_stream": resourceAwsKinesisVideoStream(), + "aws_kms_alias": resourceAwsKmsAlias(), + "aws_kms_external_key": resourceAwsKmsExternalKey(), + "aws_kms_grant": resourceAwsKmsGrant(), + "aws_kms_key": resourceAwsKmsKey(), + "aws_kms_ciphertext": resourceAwsKmsCiphertext(), + "aws_lakeformation_data_lake_settings": resourceAwsLakeFormationDataLakeSettings(), + "aws_lakeformation_permissions": resourceAwsLakeFormationPermissions(), + "aws_lakeformation_resource": resourceAwsLakeFormationResource(), + "aws_lambda_alias": resourceAwsLambdaAlias(), + "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), + "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), + "aws_lambda_function_event_invoke_config": resourceAwsLambdaFunctionEventInvokeConfig(), + "aws_lambda_function": resourceAwsLambdaFunction(), + "aws_lambda_layer_version": resourceAwsLambdaLayerVersion(), + "aws_lambda_permission": resourceAwsLambdaPermission(), + "aws_lambda_provisioned_concurrency_config": resourceAwsLambdaProvisionedConcurrencyConfig(), + "aws_launch_configuration": resourceAwsLaunchConfiguration(), + "aws_launch_template": resourceAwsLaunchTemplate(), + "aws_lex_bot": resourceAwsLexBot(), + "aws_lex_bot_alias": resourceAwsLexBotAlias(), + "aws_lex_intent": resourceAwsLexIntent(), + "aws_lex_slot_type": resourceAwsLexSlotType(), + "aws_licensemanager_association": resourceAwsLicenseManagerAssociation(), + "aws_licensemanager_license_configuration": resourceAwsLicenseManagerLicenseConfiguration(), + "aws_lightsail_domain": resourceAwsLightsailDomain(), + "aws_lightsail_instance": resourceAwsLightsailInstance(), + "aws_lightsail_instance_public_ports": resourceAwsLightsailInstancePublicPorts(), + "aws_lightsail_key_pair": resourceAwsLightsailKeyPair(), + "aws_lightsail_static_ip": resourceAwsLightsailStaticIp(), + "aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(), + "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), + "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), + "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), + "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), + "aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(), + "aws_macie2_account": resourceAwsMacie2Account(), + "aws_macie2_classification_job": resourceAwsMacie2ClassificationJob(), + "aws_macie2_custom_data_identifier": resourceAwsMacie2CustomDataIdentifier(), + "aws_macie2_findings_filter": resourceAwsMacie2FindingsFilter(), + "aws_macie2_invitation_accepter": resourceAwsMacie2InvitationAccepter(), + "aws_macie2_member": resourceAwsMacie2Member(), + "aws_macie2_organization_admin_account": resourceAwsMacie2OrganizationAdminAccount(), + "aws_macie_member_account_association": resourceAwsMacieMemberAccountAssociation(), + "aws_macie_s3_bucket_association": resourceAwsMacieS3BucketAssociation(), + "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), + "aws_mq_broker": resourceAwsMqBroker(), + "aws_mq_configuration": resourceAwsMqConfiguration(), + "aws_media_convert_queue": resourceAwsMediaConvertQueue(), + "aws_media_package_channel": resourceAwsMediaPackageChannel(), + "aws_media_store_container": resourceAwsMediaStoreContainer(), + "aws_media_store_container_policy": resourceAwsMediaStoreContainerPolicy(), + "aws_msk_cluster": resourceAwsMskCluster(), + "aws_msk_configuration": resourceAwsMskConfiguration(), + "aws_msk_scram_secret_association": resourceAwsMskScramSecretAssociation(), + "aws_mwaa_environment": resourceAwsMwaaEnvironment(), + "aws_nat_gateway": resourceAwsNatGateway(), + "aws_network_acl": resourceAwsNetworkAcl(), + "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), + "aws_neptune_cluster": resourceAwsNeptuneCluster(), + "aws_neptune_cluster_endpoint": resourceAwsNeptuneClusterEndpoint(), + "aws_neptune_cluster_instance": resourceAwsNeptuneClusterInstance(), + "aws_neptune_cluster_parameter_group": resourceAwsNeptuneClusterParameterGroup(), + "aws_neptune_cluster_snapshot": resourceAwsNeptuneClusterSnapshot(), + "aws_neptune_event_subscription": resourceAwsNeptuneEventSubscription(), + "aws_neptune_parameter_group": resourceAwsNeptuneParameterGroup(), + "aws_neptune_subnet_group": resourceAwsNeptuneSubnetGroup(), + "aws_network_acl_rule": resourceAwsNetworkAclRule(), + "aws_network_interface": resourceAwsNetworkInterface(), + "aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(), + "aws_networkfirewall_firewall": resourceAwsNetworkFirewallFirewall(), + "aws_networkfirewall_firewall_policy": resourceAwsNetworkFirewallFirewallPolicy(), + "aws_networkfirewall_logging_configuration": resourceAwsNetworkFirewallLoggingConfiguration(), + "aws_networkfirewall_resource_policy": resourceAwsNetworkFirewallResourcePolicy(), + "aws_networkfirewall_rule_group": resourceAwsNetworkFirewallRuleGroup(), + "aws_opsworks_application": resourceAwsOpsworksApplication(), + "aws_opsworks_stack": resourceAwsOpsworksStack(), + "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), + "aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(), + "aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(), + "aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(), + "aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(), + "aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(), + "aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(), + "aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(), + "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), + "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), + "aws_opsworks_instance": resourceAwsOpsworksInstance(), + "aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(), + "aws_opsworks_permission": resourceAwsOpsworksPermission(), + "aws_opsworks_rds_db_instance": resourceAwsOpsworksRdsDbInstance(), + "aws_organizations_organization": resourceAwsOrganizationsOrganization(), + "aws_organizations_account": resourceAwsOrganizationsAccount(), + "aws_organizations_delegated_administrator": resourceAwsOrganizationsDelegatedAdministrator(), + "aws_organizations_policy": resourceAwsOrganizationsPolicy(), + "aws_organizations_policy_attachment": resourceAwsOrganizationsPolicyAttachment(), + "aws_organizations_organizational_unit": resourceAwsOrganizationsOrganizationalUnit(), + "aws_placement_group": resourceAwsPlacementGroup(), + "aws_prometheus_workspace": resourceAwsPrometheusWorkspace(), + "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), + "aws_qldb_ledger": resourceAwsQLDBLedger(), + "aws_quicksight_group": resourceAwsQuickSightGroup(), + "aws_quicksight_user": resourceAwsQuickSightUser(), + "aws_ram_principal_association": resourceAwsRamPrincipalAssociation(), + "aws_ram_resource_association": resourceAwsRamResourceAssociation(), + "aws_ram_resource_share": resourceAwsRamResourceShare(), + "aws_ram_resource_share_accepter": resourceAwsRamResourceShareAccepter(), + "aws_rds_cluster": resourceAwsRDSCluster(), + "aws_rds_cluster_endpoint": resourceAwsRDSClusterEndpoint(), + "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), + "aws_rds_cluster_parameter_group": resourceAwsRDSClusterParameterGroup(), + "aws_rds_cluster_role_association": resourceAwsRDSClusterRoleAssociation(), + "aws_rds_global_cluster": resourceAwsRDSGlobalCluster(), + "aws_redshift_cluster": resourceAwsRedshiftCluster(), + "aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(), + "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), + "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), + "aws_redshift_snapshot_copy_grant": resourceAwsRedshiftSnapshotCopyGrant(), + "aws_redshift_snapshot_schedule": resourceAwsRedshiftSnapshotSchedule(), + "aws_redshift_snapshot_schedule_association": resourceAwsRedshiftSnapshotScheduleAssociation(), + "aws_redshift_event_subscription": resourceAwsRedshiftEventSubscription(), + "aws_resourcegroups_group": resourceAwsResourceGroupsGroup(), + "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), + "aws_route53_hosted_zone_dnssec": resourceAwsRoute53HostedZoneDnssec(), + "aws_route53_key_signing_key": resourceAwsRoute53KeySigningKey(), + "aws_route53_query_log": resourceAwsRoute53QueryLog(), + "aws_route53_record": resourceAwsRoute53Record(), + "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), + "aws_route53_vpc_association_authorization": resourceAwsRoute53VPCAssociationAuthorization(), + "aws_route53_zone": resourceAwsRoute53Zone(), + "aws_route53_health_check": resourceAwsRoute53HealthCheck(), + "aws_route53_resolver_dnssec_config": resourceAwsRoute53ResolverDnssecConfig(), + "aws_route53_resolver_endpoint": resourceAwsRoute53ResolverEndpoint(), + "aws_route53_resolver_firewall_config": resourceAwsRoute53ResolverFirewallConfig(), + "aws_route53_resolver_firewall_domain_list": resourceAwsRoute53ResolverFirewallDomainList(), + "aws_route53_resolver_firewall_rule": resourceAwsRoute53ResolverFirewallRule(), + "aws_route53_resolver_firewall_rule_group": resourceAwsRoute53ResolverFirewallRuleGroup(), + "aws_route53_resolver_firewall_rule_group_association": resourceAwsRoute53ResolverFirewallRuleGroupAssociation(), + "aws_route53_resolver_query_log_config": resourceAwsRoute53ResolverQueryLogConfig(), + "aws_route53_resolver_query_log_config_association": resourceAwsRoute53ResolverQueryLogConfigAssociation(), + "aws_route53_resolver_rule_association": resourceAwsRoute53ResolverRuleAssociation(), + "aws_route53_resolver_rule": resourceAwsRoute53ResolverRule(), + "aws_route": resourceAwsRoute(), + "aws_route_table": resourceAwsRouteTable(), + "aws_default_route_table": resourceAwsDefaultRouteTable(), + "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_sagemaker_app": resourceAwsSagemakerApp(), + "aws_sagemaker_app_image_config": resourceAwsSagemakerAppImageConfig(), + "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), + "aws_sagemaker_domain": resourceAwsSagemakerDomain(), + "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), + "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), + "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), + "aws_sagemaker_image": resourceAwsSagemakerImage(), + "aws_sagemaker_image_version": resourceAwsSagemakerImageVersion(), + "aws_sagemaker_model": resourceAwsSagemakerModel(), + "aws_sagemaker_model_package_group": resourceAwsSagemakerModelPackageGroup(), + "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), + "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), + "aws_sagemaker_user_profile": resourceAwsSagemakerUserProfile(), + "aws_sagemaker_workforce": resourceAwsSagemakerWorkforce(), + "aws_sagemaker_workteam": resourceAwsSagemakerWorkteam(), + "aws_schemas_discoverer": resourceAwsSchemasDiscoverer(), + "aws_schemas_registry": resourceAwsSchemasRegistry(), + "aws_schemas_schema": resourceAwsSchemasSchema(), + "aws_secretsmanager_secret": resourceAwsSecretsManagerSecret(), + "aws_secretsmanager_secret_policy": resourceAwsSecretsManagerSecretPolicy(), + "aws_secretsmanager_secret_version": resourceAwsSecretsManagerSecretVersion(), + "aws_secretsmanager_secret_rotation": resourceAwsSecretsManagerSecretRotation(), + "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), + "aws_ses_domain_identity": resourceAwsSesDomainIdentity(), + "aws_ses_domain_identity_verification": resourceAwsSesDomainIdentityVerification(), + "aws_ses_domain_dkim": resourceAwsSesDomainDkim(), + "aws_ses_domain_mail_from": resourceAwsSesDomainMailFrom(), + "aws_ses_email_identity": resourceAwsSesEmailIdentity(), + "aws_ses_identity_policy": resourceAwsSesIdentityPolicy(), + "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), + "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), + "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), + "aws_ses_configuration_set": resourceAwsSesConfigurationSet(), + "aws_ses_event_destination": resourceAwsSesEventDestination(), + "aws_ses_identity_notification_topic": resourceAwsSesNotificationTopic(), + "aws_ses_template": resourceAwsSesTemplate(), + "aws_s3_access_point": resourceAwsS3AccessPoint(), + "aws_s3_account_public_access_block": resourceAwsS3AccountPublicAccessBlock(), + "aws_s3_bucket": resourceAwsS3Bucket(), + "aws_s3_bucket_analytics_configuration": resourceAwsS3BucketAnalyticsConfiguration(), + "aws_s3_bucket_policy": resourceAwsS3BucketPolicy(), + "aws_s3_bucket_public_access_block": resourceAwsS3BucketPublicAccessBlock(), + "aws_s3_bucket_object": resourceAwsS3BucketObject(), + "aws_s3_bucket_ownership_controls": resourceAwsS3BucketOwnershipControls(), + "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), + "aws_s3_bucket_metric": resourceAwsS3BucketMetric(), + "aws_s3_bucket_inventory": resourceAwsS3BucketInventory(), + "aws_s3_bucket_replication_configuration": resourceAwsS3BucketReplicationConfiguration(), + "aws_s3_object_copy": resourceAwsS3ObjectCopy(), + "aws_s3control_bucket": resourceAwsS3ControlBucket(), + "aws_s3control_bucket_policy": resourceAwsS3ControlBucketPolicy(), + "aws_s3control_bucket_lifecycle_configuration": resourceAwsS3ControlBucketLifecycleConfiguration(), + "aws_s3outposts_endpoint": resourceAwsS3OutpostsEndpoint(), + "aws_security_group": resourceAwsSecurityGroup(), + "aws_network_interface_sg_attachment": resourceAwsNetworkInterfaceSGAttachment(), + "aws_default_security_group": resourceAwsDefaultSecurityGroup(), + "aws_security_group_rule": resourceAwsSecurityGroupRule(), + "aws_securityhub_account": resourceAwsSecurityHubAccount(), + "aws_securityhub_action_target": resourceAwsSecurityHubActionTarget(), + "aws_securityhub_insight": resourceAwsSecurityHubInsight(), + "aws_securityhub_invite_accepter": resourceAwsSecurityHubInviteAccepter(), + "aws_securityhub_member": resourceAwsSecurityHubMember(), + "aws_securityhub_organization_admin_account": resourceAwsSecurityHubOrganizationAdminAccount(), + "aws_securityhub_organization_configuration": resourceAwsSecurityHubOrganizationConfiguration(), + "aws_securityhub_product_subscription": resourceAwsSecurityHubProductSubscription(), + "aws_securityhub_standards_control": resourceAwsSecurityHubStandardsControl(), + "aws_securityhub_standards_subscription": resourceAwsSecurityHubStandardsSubscription(), + "aws_servicecatalog_budget_resource_association": resourceAwsServiceCatalogBudgetResourceAssociation(), + "aws_servicecatalog_constraint": resourceAwsServiceCatalogConstraint(), + "aws_servicecatalog_organizations_access": resourceAwsServiceCatalogOrganizationsAccess(), + "aws_servicecatalog_portfolio": resourceAwsServiceCatalogPortfolio(), + "aws_servicecatalog_portfolio_share": resourceAwsServiceCatalogPortfolioShare(), + "aws_servicecatalog_product": resourceAwsServiceCatalogProduct(), + "aws_servicecatalog_provisioned_product": resourceAwsServiceCatalogProvisionedProduct(), + "aws_servicecatalog_service_action": resourceAwsServiceCatalogServiceAction(), + "aws_servicecatalog_tag_option": resourceAwsServiceCatalogTagOption(), + "aws_servicecatalog_tag_option_resource_association": resourceAwsServiceCatalogTagOptionResourceAssociation(), + "aws_servicecatalog_principal_portfolio_association": resourceAwsServiceCatalogPrincipalPortfolioAssociation(), + "aws_servicecatalog_product_portfolio_association": resourceAwsServiceCatalogProductPortfolioAssociation(), + "aws_servicecatalog_provisioning_artifact": resourceAwsServiceCatalogProvisioningArtifact(), + "aws_service_discovery_http_namespace": resourceAwsServiceDiscoveryHttpNamespace(), + "aws_service_discovery_private_dns_namespace": resourceAwsServiceDiscoveryPrivateDnsNamespace(), + "aws_service_discovery_public_dns_namespace": resourceAwsServiceDiscoveryPublicDnsNamespace(), + "aws_service_discovery_service": resourceAwsServiceDiscoveryService(), + "aws_servicequotas_service_quota": resourceAwsServiceQuotasServiceQuota(), + "aws_shield_protection": resourceAwsShieldProtection(), + "aws_signer_signing_job": resourceAwsSignerSigningJob(), + "aws_signer_signing_profile": resourceAwsSignerSigningProfile(), + "aws_signer_signing_profile_permission": resourceAwsSignerSigningProfilePermission(), + "aws_simpledb_domain": resourceAwsSimpleDBDomain(), + "aws_ssm_activation": resourceAwsSsmActivation(), + "aws_ssm_association": resourceAwsSsmAssociation(), + "aws_ssm_document": resourceAwsSsmDocument(), + "aws_ssm_maintenance_window": resourceAwsSsmMaintenanceWindow(), + "aws_ssm_maintenance_window_target": resourceAwsSsmMaintenanceWindowTarget(), + "aws_ssm_maintenance_window_task": resourceAwsSsmMaintenanceWindowTask(), + "aws_ssm_patch_baseline": resourceAwsSsmPatchBaseline(), + "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), + "aws_ssm_parameter": resourceAwsSsmParameter(), + "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), + "aws_ssoadmin_account_assignment": resourceAwsSsoAdminAccountAssignment(), + "aws_ssoadmin_managed_policy_attachment": resourceAwsSsoAdminManagedPolicyAttachment(), + "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), + "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), + "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), + "aws_storagegateway_cached_iscsi_volume": resourceAwsStorageGatewayCachedIscsiVolume(), + "aws_storagegateway_file_system_association": resourceAwsStorageGatewayFileSystemAssociation(), + "aws_storagegateway_gateway": resourceAwsStorageGatewayGateway(), + "aws_storagegateway_nfs_file_share": resourceAwsStorageGatewayNfsFileShare(), + "aws_storagegateway_smb_file_share": resourceAwsStorageGatewaySmbFileShare(), + "aws_storagegateway_stored_iscsi_volume": resourceAwsStorageGatewayStoredIscsiVolume(), + "aws_storagegateway_tape_pool": resourceAwsStorageGatewayTapePool(), + "aws_storagegateway_upload_buffer": resourceAwsStorageGatewayUploadBuffer(), + "aws_storagegateway_working_storage": resourceAwsStorageGatewayWorkingStorage(), + "aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(), + "aws_spot_instance_request": resourceAwsSpotInstanceRequest(), + "aws_spot_fleet_request": resourceAwsSpotFleetRequest(), + "aws_sqs_queue": resourceAwsSqsQueue(), + "aws_sqs_queue_policy": resourceAwsSqsQueuePolicy(), + "aws_snapshot_create_volume_permission": resourceAwsSnapshotCreateVolumePermission(), + "aws_sns_platform_application": resourceAwsSnsPlatformApplication(), + "aws_sns_sms_preferences": resourceAwsSnsSmsPreferences(), + "aws_sns_topic": resourceAwsSnsTopic(), + "aws_sns_topic_policy": resourceAwsSnsTopicPolicy(), + "aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(), + "aws_sfn_activity": resourceAwsSfnActivity(), + "aws_sfn_state_machine": resourceAwsSfnStateMachine(), + "aws_default_subnet": resourceAwsDefaultSubnet(), + "aws_subnet": resourceAwsSubnet(), + "aws_swf_domain": resourceAwsSwfDomain(), + "aws_synthetics_canary": resourceAwsSyntheticsCanary(), + "aws_timestreamwrite_database": resourceAwsTimestreamWriteDatabase(), + "aws_timestreamwrite_table": resourceAwsTimestreamWriteTable(), + "aws_transfer_server": resourceAwsTransferServer(), + "aws_transfer_ssh_key": resourceAwsTransferSshKey(), + "aws_transfer_user": resourceAwsTransferUser(), + "aws_volume_attachment": resourceAwsVolumeAttachment(), + "aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(), + "aws_default_vpc_dhcp_options": resourceAwsDefaultVpcDhcpOptions(), + "aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(), + "aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(), + "aws_vpc_peering_connection_accepter": resourceAwsVpcPeeringConnectionAccepter(), + "aws_vpc_peering_connection_options": resourceAwsVpcPeeringConnectionOptions(), + "aws_default_vpc": resourceAwsDefaultVpc(), + "aws_vpc": resourceAwsVpc(), + "aws_vpc_endpoint": resourceAwsVpcEndpoint(), + "aws_vpc_endpoint_connection_notification": resourceAwsVpcEndpointConnectionNotification(), + "aws_vpc_endpoint_route_table_association": resourceAwsVpcEndpointRouteTableAssociation(), + "aws_vpc_endpoint_subnet_association": resourceAwsVpcEndpointSubnetAssociation(), + "aws_vpc_endpoint_service": resourceAwsVpcEndpointService(), + "aws_vpc_endpoint_service_allowed_principal": resourceAwsVpcEndpointServiceAllowedPrincipal(), + "aws_vpc_ipv4_cidr_block_association": resourceAwsVpcIpv4CidrBlockAssociation(), + "aws_vpn_connection": resourceAwsVpnConnection(), + "aws_vpn_connection_route": resourceAwsVpnConnectionRoute(), + "aws_vpn_gateway": resourceAwsVpnGateway(), + "aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(), + "aws_vpn_gateway_route_propagation": resourceAwsVpnGatewayRoutePropagation(), + "aws_waf_byte_match_set": resourceAwsWafByteMatchSet(), + "aws_waf_ipset": resourceAwsWafIPSet(), + "aws_waf_rate_based_rule": resourceAwsWafRateBasedRule(), + "aws_waf_regex_match_set": resourceAwsWafRegexMatchSet(), + "aws_waf_regex_pattern_set": resourceAwsWafRegexPatternSet(), + "aws_waf_rule": resourceAwsWafRule(), + "aws_waf_rule_group": resourceAwsWafRuleGroup(), + "aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(), + "aws_waf_web_acl": resourceAwsWafWebAcl(), + "aws_waf_xss_match_set": resourceAwsWafXssMatchSet(), + "aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(), + "aws_waf_geo_match_set": resourceAwsWafGeoMatchSet(), + "aws_wafregional_byte_match_set": resourceAwsWafRegionalByteMatchSet(), + "aws_wafregional_geo_match_set": resourceAwsWafRegionalGeoMatchSet(), + "aws_wafregional_ipset": resourceAwsWafRegionalIPSet(), + "aws_wafregional_rate_based_rule": resourceAwsWafRegionalRateBasedRule(), + "aws_wafregional_regex_match_set": resourceAwsWafRegionalRegexMatchSet(), + "aws_wafregional_regex_pattern_set": resourceAwsWafRegionalRegexPatternSet(), + "aws_wafregional_rule": resourceAwsWafRegionalRule(), + "aws_wafregional_rule_group": resourceAwsWafRegionalRuleGroup(), + "aws_wafregional_size_constraint_set": resourceAwsWafRegionalSizeConstraintSet(), + "aws_wafregional_sql_injection_match_set": resourceAwsWafRegionalSqlInjectionMatchSet(), + "aws_wafregional_xss_match_set": resourceAwsWafRegionalXssMatchSet(), + "aws_wafregional_web_acl": resourceAwsWafRegionalWebAcl(), + "aws_wafregional_web_acl_association": resourceAwsWafRegionalWebAclAssociation(), + "aws_wafv2_ip_set": resourceAwsWafv2IPSet(), + "aws_wafv2_regex_pattern_set": resourceAwsWafv2RegexPatternSet(), + "aws_wafv2_rule_group": resourceAwsWafv2RuleGroup(), + "aws_wafv2_web_acl": resourceAwsWafv2WebACL(), + "aws_wafv2_web_acl_association": resourceAwsWafv2WebACLAssociation(), + "aws_wafv2_web_acl_logging_configuration": resourceAwsWafv2WebACLLoggingConfiguration(), + "aws_worklink_fleet": resourceAwsWorkLinkFleet(), + "aws_worklink_website_certificate_authority_association": resourceAwsWorkLinkWebsiteCertificateAuthorityAssociation(), + "aws_workspaces_directory": resourceAwsWorkspacesDirectory(), + "aws_workspaces_workspace": resourceAwsWorkspacesWorkspace(), + "aws_batch_compute_environment": resourceAwsBatchComputeEnvironment(), + "aws_batch_job_definition": resourceAwsBatchJobDefinition(), + "aws_batch_job_queue": resourceAwsBatchJobQueue(), + "aws_pinpoint_app": resourceAwsPinpointApp(), + "aws_pinpoint_adm_channel": resourceAwsPinpointADMChannel(), + "aws_pinpoint_apns_channel": resourceAwsPinpointAPNSChannel(), + "aws_pinpoint_apns_sandbox_channel": resourceAwsPinpointAPNSSandboxChannel(), + "aws_pinpoint_apns_voip_channel": resourceAwsPinpointAPNSVoipChannel(), + "aws_pinpoint_apns_voip_sandbox_channel": resourceAwsPinpointAPNSVoipSandboxChannel(), + "aws_pinpoint_baidu_channel": resourceAwsPinpointBaiduChannel(), + "aws_pinpoint_email_channel": resourceAwsPinpointEmailChannel(), + "aws_pinpoint_event_stream": resourceAwsPinpointEventStream(), + "aws_pinpoint_gcm_channel": resourceAwsPinpointGCMChannel(), + "aws_pinpoint_sms_channel": resourceAwsPinpointSMSChannel(), + "aws_xray_encryption_config": resourceAwsXrayEncryptionConfig(), + "aws_xray_group": resourceAwsXrayGroup(), + "aws_xray_sampling_rule": resourceAwsXraySamplingRule(), + "aws_workspaces_ip_group": resourceAwsWorkspacesIpGroup(), + + // ALBs are actually LBs because they can be type `network` or `application` + // To avoid regressions, we will add a new resource for each and they both point + // back to the old ALB version. IF the Terraform supported aliases for resources + // this would be a whole lot simpler + "aws_alb": resourceAwsLb(), + "aws_lb": resourceAwsLb(), + "aws_alb_listener": resourceAwsLbListener(), + "aws_lb_listener": resourceAwsLbListener(), + "aws_alb_listener_certificate": resourceAwsLbListenerCertificate(), + "aws_lb_listener_certificate": resourceAwsLbListenerCertificate(), + "aws_alb_listener_rule": resourceAwsLbbListenerRule(), + "aws_lb_listener_rule": resourceAwsLbbListenerRule(), + "aws_alb_target_group": resourceAwsLbTargetGroup(), + "aws_lb_target_group": resourceAwsLbTargetGroup(), + "aws_alb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), + "aws_lb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), + }, + } + + // Avoid Go formatting churn and Git conflicts + // You probably should not do this + provider.DataSourcesMap["aws_serverlessapplicationrepository_application"] = dataSourceAwsServerlessApplicationRepositoryApplication() + provider.ResourcesMap["aws_serverlessapplicationrepository_cloudformation_stack"] = resourceAwsServerlessApplicationRepositoryCloudFormationStack() + + provider.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) { + terraformVersion := provider.TerraformVersion + if terraformVersion == "" { + // Terraform 0.12 introduced this field to the protocol + // We can therefore assume that if it's missing it's 0.10 or 0.11 + terraformVersion = "0.11+compatible" + } + return providerConfigure(d, terraformVersion) + } + + return provider +} + +var descriptions map[string]string +var endpointServiceNames []string + +func init() { + descriptions = map[string]string{ + "region": "The region where AWS operations will take place. Examples\n" + + "are us-east-1, us-west-2, etc.", // lintignore:AWSAT003 + + "access_key": "The access key for API operations. You can retrieve this\n" + + "from the 'Security & Credentials' section of the AWS console.", + + "secret_key": "The secret key for API operations. You can retrieve this\n" + + "from the 'Security & Credentials' section of the AWS console.", + + "profile": "The profile for API operations. If not set, the default profile\n" + + "created with `aws configure` will be used.", + + "shared_credentials_file": "The path to the shared credentials file. If not set\n" + + "this defaults to ~/.aws/credentials.", + + "token": "session token. A session token is only required if you are\n" + + "using temporary security credentials.", + + "max_retries": "The maximum number of times an AWS API request is\n" + + "being executed. If the API request still fails, an error is\n" + + "thrown.", + + "endpoint": "Use this to override the default service endpoint URL", + + "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + + "default value is `false`", + + "skip_credentials_validation": "Skip the credentials validation via STS API. " + + "Used for AWS API implementations that do not have STS available/implemented.", + + "skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " + + "Used by users that don't have ec2:DescribeAccountAttributes permissions.", + + "skip_region_validation": "Skip static validation of region name. " + + "Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).", + + "skip_requesting_account_id": "Skip requesting the account ID. " + + "Used for AWS API implementations that do not have IAM/STS API and/or metadata API.", + + "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + + "Used for AWS API implementations that do not have a metadata api endpoint.", + + "s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" + + "i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" + + "use virtual hosted bucket addressing when possible\n" + + "(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.", + } + + endpointServiceNames = []string{ + "accessanalyzer", + "acm", + "acmpca", + "amplify", + "apigateway", + "appconfig", + "applicationautoscaling", + "applicationinsights", + "appmesh", + "apprunner", + "appstream", + "appsync", + "athena", + "auditmanager", + "autoscaling", + "autoscalingplans", + "backup", + "batch", + "budgets", + "chime", + "cloud9", + "cloudformation", + "cloudfront", + "cloudhsm", + "cloudsearch", + "cloudtrail", + "cloudwatch", + "cloudwatchevents", + "cloudwatchlogs", + "codeartifact", + "codebuild", + "codecommit", + "codedeploy", + "codepipeline", + "codestarconnections", + "cognitoidentity", + "cognitoidp", + "configservice", + "connect", + "cur", + "dataexchange", + "datapipeline", + "datasync", + "dax", + "detective", + "devicefarm", + "directconnect", + "dlm", + "dms", + "docdb", + "ds", + "dynamodb", + "ec2", + "ecr", + "ecrpublic", + "ecs", + "efs", + "eks", + "elasticache", + "elasticbeanstalk", + "elastictranscoder", + "elb", + "emr", + "emrcontainers", + "es", + "firehose", + "fms", + "forecast", + "fsx", + "gamelift", + "glacier", + "globalaccelerator", + "glue", + "greengrass", + "guardduty", + "iam", + "identitystore", + "imagebuilder", + "inspector", + "iot", + "iotanalytics", + "iotevents", + "kafka", + "kinesis", + "kinesisanalytics", + "kinesisanalyticsv2", + "kinesisvideo", + "kms", + "lakeformation", + "lambda", + "lexmodels", + "licensemanager", + "lightsail", + "location", + "macie", + "macie2", + "managedblockchain", + "marketplacecatalog", + "mediaconnect", + "mediaconvert", + "medialive", + "mediapackage", + "mediastore", + "mediastoredata", + "mq", + "mwaa", + "neptune", + "networkfirewall", + "networkmanager", + "opsworks", + "organizations", + "outposts", + "personalize", + "pinpoint", + "pricing", + "qldb", + "quicksight", + "ram", + "rds", + "redshift", + "resourcegroups", + "resourcegroupstaggingapi", + "route53", + "route53domains", + "route53resolver", + "s3", + "s3control", + "s3outposts", + "sagemaker", + "schemas", + "sdb", + "secretsmanager", + "securityhub", + "serverlessrepo", + "servicecatalog", + "servicediscovery", + "servicequotas", + "ses", + "shield", + "signer", + "sns", + "sqs", + "ssm", + "ssoadmin", + "stepfunctions", + "storagegateway", + "sts", + "swf", + "synthetics", + "timestreamwrite", + "transfer", + "waf", + "wafregional", + "wafv2", + "worklink", + "workmail", + "workspaces", + "xray", + } +} + +func providerConfigure(d *schema.ResourceData, terraformVersion string) (interface{}, error) { + config := Config{ + AccessKey: d.Get("access_key").(string), + SecretKey: d.Get("secret_key").(string), + Profile: d.Get("profile").(string), + Token: d.Get("token").(string), + Region: d.Get("region").(string), + CredsFilename: d.Get("shared_credentials_file").(string), + DefaultTagsConfig: expandProviderDefaultTags(d.Get("default_tags").([]interface{})), + Endpoints: make(map[string]string), + MaxRetries: d.Get("max_retries").(int), + IgnoreTagsConfig: expandProviderIgnoreTags(d.Get("ignore_tags").([]interface{})), + Insecure: d.Get("insecure").(bool), + SkipCredsValidation: d.Get("skip_credentials_validation").(bool), + SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool), + SkipRegionValidation: d.Get("skip_region_validation").(bool), + SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), + SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), + S3ForcePathStyle: d.Get("s3_force_path_style").(bool), + terraformVersion: terraformVersion, + } + + if l, ok := d.Get("assume_role").([]interface{}); ok && len(l) > 0 && l[0] != nil { + m := l[0].(map[string]interface{}) + + if v, ok := m["duration_seconds"].(int); ok && v != 0 { + config.AssumeRoleDurationSeconds = v + } + + if v, ok := m["external_id"].(string); ok && v != "" { + config.AssumeRoleExternalID = v + } + + if v, ok := m["policy"].(string); ok && v != "" { + config.AssumeRolePolicy = v + } + + if policyARNSet, ok := m["policy_arns"].(*schema.Set); ok && policyARNSet.Len() > 0 { + for _, policyARNRaw := range policyARNSet.List() { + policyARN, ok := policyARNRaw.(string) + + if !ok { + continue + } + + config.AssumeRolePolicyARNs = append(config.AssumeRolePolicyARNs, policyARN) + } + } + + if v, ok := m["role_arn"].(string); ok && v != "" { + config.AssumeRoleARN = v + } + + if v, ok := m["session_name"].(string); ok && v != "" { + config.AssumeRoleSessionName = v + } + + if tagMapRaw, ok := m["tags"].(map[string]interface{}); ok && len(tagMapRaw) > 0 { + config.AssumeRoleTags = make(map[string]string) + + for k, vRaw := range tagMapRaw { + v, ok := vRaw.(string) + + if !ok { + continue + } + + config.AssumeRoleTags[k] = v + } + } + + if transitiveTagKeySet, ok := m["transitive_tag_keys"].(*schema.Set); ok && transitiveTagKeySet.Len() > 0 { + for _, transitiveTagKeyRaw := range transitiveTagKeySet.List() { + transitiveTagKey, ok := transitiveTagKeyRaw.(string) + + if !ok { + continue + } + + config.AssumeRoleTransitiveTagKeys = append(config.AssumeRoleTransitiveTagKeys, transitiveTagKey) + } + } + + log.Printf("[INFO] assume_role configuration set: (ARN: %q, SessionID: %q, ExternalID: %q)", config.AssumeRoleARN, config.AssumeRoleSessionName, config.AssumeRoleExternalID) + } + + endpointsSet := d.Get("endpoints").(*schema.Set) + + for _, endpointsSetI := range endpointsSet.List() { + endpoints := endpointsSetI.(map[string]interface{}) + for _, endpointServiceName := range endpointServiceNames { + config.Endpoints[endpointServiceName] = endpoints[endpointServiceName].(string) + } + } + + if v, ok := d.GetOk("allowed_account_ids"); ok { + for _, accountIDRaw := range v.(*schema.Set).List() { + config.AllowedAccountIds = append(config.AllowedAccountIds, accountIDRaw.(string)) + } + } + + if v, ok := d.GetOk("forbidden_account_ids"); ok { + for _, accountIDRaw := range v.(*schema.Set).List() { + config.ForbiddenAccountIds = append(config.ForbiddenAccountIds, accountIDRaw.(string)) + } + } + + return config.Client() +} + +// This is a global MutexKV for use within this plugin. +var awsMutexKV = mutexkv.NewMutexKV() + +func assumeRoleSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "duration_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: "Seconds to restrict the assume role session duration.", + }, + "external_id": { + Type: schema.TypeString, + Optional: true, + Description: "Unique identifier that might be required for assuming a role in another account.", + }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", + ValidateFunc: validation.StringIsJSON, + }, + "policy_arns": { + Type: schema.TypeSet, + Optional: true, + Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "Amazon Resource Name of an IAM Role to assume prior to making API calls.", + ValidateFunc: validateArn, + }, + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: "Identifier for the assumed role session.", + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Assume role session tags.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "transitive_tag_keys": { + Type: schema.TypeSet, + Optional: true, + Description: "Assume role session tag keys to pass to any subsequent sessions.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + } +} + +func endpointsSchema() *schema.Schema { + endpointsAttributes := make(map[string]*schema.Schema) + + for _, endpointServiceName := range endpointServiceNames { + endpointsAttributes[endpointServiceName] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["endpoint"], + } + } + + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: endpointsAttributes, + }, + } +} + +func expandProviderDefaultTags(l []interface{}) *keyvaluetags.DefaultConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + defaultConfig := &keyvaluetags.DefaultConfig{} + m := l[0].(map[string]interface{}) + + if v, ok := m["tags"].(map[string]interface{}); ok { + defaultConfig.Tags = keyvaluetags.New(v) + } + return defaultConfig +} + +func expandProviderIgnoreTags(l []interface{}) *keyvaluetags.IgnoreConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + ignoreConfig := &keyvaluetags.IgnoreConfig{} + m := l[0].(map[string]interface{}) + + if v, ok := m["keys"].(*schema.Set); ok { + ignoreConfig.Keys = keyvaluetags.New(v.List()) + } + + if v, ok := m["key_prefixes"].(*schema.Set); ok { + ignoreConfig.KeyPrefixes = keyvaluetags.New(v.List()) + } + + return ignoreConfig +} + +// ReverseDns switches a DNS hostname to reverse DNS and vice-versa. +func ReverseDns(hostname string) string { + parts := strings.Split(hostname, ".") + + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + + return strings.Join(parts, ".") +} From 3bd0c6b7d0edcb9425d656ef6c95b95bffed05a1 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 30 Jul 2021 11:35:47 -0700 Subject: [PATCH 056/304] cleanup and remove unneeded logic --- ...aws_s3_bucket_replication_configuration.go | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go new file mode 100644 index 000000000000..400a2f12c5b8 --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -0,0 +1,215 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketReplicationConfigurationCreate, + Read: resourceAwsS3BucketReplicationConfigurationRead, + Update: resourceAwsS3BucketReplicationConfigurationUpdate, + Delete: resourceAwsS3BucketReplicationConfigurationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"bucket_prefix"}, + ValidateFunc: validation.StringLenBetween(0, 63), + }, + "role": { + Type: schema.TypeString, + Required: true, + }, + "rules": { + Type: schema.TypeSet, + Required: true, + Set: rulesHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), + }, + "destination": { + Type: schema.TypeList, + MaxItems: 1, + MinItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsAccountId, + }, + "bucket": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + }, + "replica_kms_key_id": { + Type: schema.TypeString, + Optional: true, + }, + "access_control_translation": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + }, + }, + }, + }, + }, + }, + }, + "source_selection_criteria": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sse_kms_encrypted_objects": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "tags": tagsSchema(), + }, + }, + }, + "delete_marker_replication_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + }, + }, + }, + }, + + "tags": tagsSchema(), + "tags_all": tagsSchemaComputed(), + }, + + CustomizeDiff: SetTagsDiff, + } +} + +func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + +func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + input := &s3.HeadBucketInput{ + Bucket: aws.String(d.Get("bucket").(string)), + } + + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + _, err := s3conn.HeadBucket(input) + + if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + return resource.RetryableError(err) + } + + if d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + // Read the bucket replication configuration + replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(d.Get("bucket").(string)), + }) + }) + if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { + return fmt.Errorf("error getting S3 Bucket replication: %s", err) + } + replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) + if !ok || replication == nil { + return fmt.Errorf("error reading replication_configuration") + } + + return nil +} + +func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + + return nil +} From e4676496628685e1d1949e17400474e8c14db4e1 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:14:09 -0700 Subject: [PATCH 057/304] WIP setup update processes --- ...aws_s3_bucket_replication_configuration.go | 152 ++++++++++++++++-- 1 file changed, 142 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 400a2f12c5b8..74af667e458c 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -23,12 +26,11 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"bucket_prefix"}, - ValidateFunc: validation.StringLenBetween(0, 63), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(0, 63), }, "role": { Type: schema.TypeString, @@ -164,10 +166,6 @@ func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, m return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } -func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - return resourceAwsS3BucketReplicationConfigurationRead(d, meta) -} - func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn @@ -209,6 +207,140 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil } +func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + rc := &s3.ReplicationConfiguration{} + if val, ok := d.GetOk("role"); ok { + rc.Role = aws.String(val.(string)) + } + + rcRules := d.Get("rules").(*schema.Set).List() + rules := []*s3.ReplicationRule{} + for _, v := range rcRules { + rr := v.(map[string]interface{}) + rcRule := &s3.ReplicationRule{} + if status, ok := rr["status"]; ok && status != "" { + rcRule.Status = aws.String(status.(string)) + } else { + continue + } + + if rrid, ok := rr["id"]; ok && rrid != "" { + rcRule.ID = aws.String(rrid.(string)) + } + + ruleDestination := &s3.Destination{} + if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { + if dest[0] != nil { + bd := dest[0].(map[string]interface{}) + ruleDestination.Bucket = aws.String(bd["bucket"].(string)) + + if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { + ruleDestination.StorageClass = aws.String(storageClass.(string)) + } + + if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" { + ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)), + } + } + + if account, ok := bd["account_id"]; ok && account != "" { + ruleDestination.Account = aws.String(account.(string)) + } + + if aclTranslation, ok := bd["access_control_translation"].([]interface{}); ok && len(aclTranslation) > 0 { + aclTranslationValues := aclTranslation[0].(map[string]interface{}) + ruleAclTranslation := &s3.AccessControlTranslation{} + ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) + ruleDestination.AccessControlTranslation = ruleAclTranslation + } + } + } + rcRule.Destination = ruleDestination + + if ssc, ok := rr["source_selection_criteria"].([]interface{}); ok && len(ssc) > 0 { + if ssc[0] != nil { + sscValues := ssc[0].(map[string]interface{}) + ruleSsc := &s3.SourceSelectionCriteria{} + if sseKms, ok := sscValues["sse_kms_encrypted_objects"].([]interface{}); ok && len(sseKms) > 0 { + if sseKms[0] != nil { + sseKmsValues := sseKms[0].(map[string]interface{}) + sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} + if sseKmsValues["enabled"].(bool) { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) + } else { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) + } + ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects + } + } + rcRule.SourceSelectionCriteria = ruleSsc + } + } + + if f, ok := rr["filter"].([]interface{}); ok && len(f) > 0 && f[0] != nil { + // XML schema V2. + rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) + rcRule.Filter = &s3.ReplicationRuleFilter{} + filter := f[0].(map[string]interface{}) + tags := keyvaluetags.New(filter["tags"]).IgnoreAws().S3Tags() + if len(tags) > 0 { + rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(filter["prefix"].(string)), + Tags: tags, + } + } else { + rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) + } + + if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(dmr), + } + } else { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + } + } + } else { + // XML schema V1. + rcRule.Prefix = aws.String(rr["prefix"].(string)) + } + + rules = append(rules, rcRule) + } + + rc.Rules = rules + i := &s3.PutBucketReplicationInput{ + Bucket: aws.String(bucket), + ReplicationConfiguration: rc, + } + log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := s3conn.PutBucketReplication(i) + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketReplication(i) + } + if err != nil { + return fmt.Errorf("Error putting S3 replication configuration: %s", err) + } + + return nil + return resourceAwsS3BucketReplicationConfigurationRead(d, meta) +} + func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { return nil From a445cff08284b72bf900f7479f9544cf4db29174 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 17 Aug 2021 14:15:06 -0700 Subject: [PATCH 058/304] WIP pull in tests from s3 bucket resource --- ...3_bucket_replication_configuration_test.go | 1461 +++++++++++++++++ 1 file changed, 1461 insertions(+) create mode 100644 aws/resource_aws_s3_bucket_replication_configuration_test.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go new file mode 100644 index 000000000000..ed348eb3c912 --- /dev/null +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -0,0 +1,1461 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "regexp" + "testing" +) + +func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "GLACIER"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassGlacier), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "prefix3", + "filter.0.tags.%": "1", + "filter.0.tags.Key3": "Value3", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { + // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + EncryptionConfiguration: &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), + }, + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ + Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 +func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { + rInt := acctest.RandInt() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Account: aws.String("${data.aws_caller_identity.current.account_id}"), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + AccessControlTranslation: &s3.AccessControlTranslation{ + Owner: aws.String("Destination"), + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + +// StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 +func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { + rInt := acctest.RandInt() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), + ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), + }, + }, + }) +} + +// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 +func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + }, + }, + }, + Priority: aws.Int64(42), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String("foo"), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + }, + }, + }, + Priority: aws.Int64(41), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + And: &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(""), + Tags: []*s3.Tag{ + { + Key: aws.String("ReplicateMe"), + Value: aws.String("Yes"), + }, + { + Key: aws.String("AnotherTag"), + Value: aws.String("OK"), + }, + { + Key: aws.String("Foo"), + Value: aws.String("Bar"), + }, + }, + }, + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + }, + }, + ), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationResourceName := "aws_s3_bucket.destination" + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckAWSS3BucketExists(destinationResourceName), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + +func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { + return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = "tf-iam-role-replication-%[1]d" + + assume_role_policy = < Date: Thu, 19 Aug 2021 11:04:42 -0700 Subject: [PATCH 059/304] WIP ensure create/read/update logic is operational --- ...aws_s3_bucket_replication_configuration.go | 127 +++++++++++++++++- ...3_bucket_replication_configuration_test.go | 33 ++++- 2 files changed, 153 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 74af667e458c..4f8cc098c2a3 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -2,8 +2,8 @@ package aws import ( "fmt" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "log" + "net/http" "time" "github.com/aws/aws-sdk-go/aws" @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ - Create: resourceAwsS3BucketReplicationConfigurationCreate, + Create: resourceAwsS3BucketReplicationConfigurationPut, Read: resourceAwsS3BucketReplicationConfigurationRead, Update: resourceAwsS3BucketReplicationConfigurationUpdate, Delete: resourceAwsS3BucketReplicationConfigurationDelete, @@ -162,7 +164,16 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { } } -func resourceAwsS3BucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { +func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta interface{}) error { + // Get the bucket + var bucket string + if v, ok := d.GetOk("bucket"); ok { + bucket = v.(string) + } else { + // fail, can't do anything without a bucket + } + d.SetId(bucket) + return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) } @@ -176,7 +187,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) - if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { return resource.RetryableError(err) } @@ -190,6 +201,29 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return nil }) + + if tfresource.TimedOut(err) { + _, err = s3conn.HeadBucket(input) + } + + if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) + } + + if _, ok := d.GetOk("bucket"); !ok { + d.Set("bucket", d.Id()) + } + // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ @@ -203,6 +237,90 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if !ok || replication == nil { return fmt.Errorf("error reading replication_configuration") } + r := replication.ReplicationConfiguration + // set role + if r.Role != nil && aws.StringValue(r.Role) != "" { + d.Set("role", aws.StringValue(r.Role)) + } + + // set rules, these need to be flattened + rules := make([]interface{}, 0, len(r.Rules)) + for _, v := range r.Rules { + t := make(map[string]interface{}) + if v.Destination != nil { + rd := make(map[string]interface{}) + if v.Destination.Bucket != nil { + rd["bucket"] = aws.StringValue(v.Destination.Bucket) + } + if v.Destination.StorageClass != nil { + rd["storage_class"] = aws.StringValue(v.Destination.StorageClass) + } + if v.Destination.EncryptionConfiguration != nil { + if v.Destination.EncryptionConfiguration.ReplicaKmsKeyID != nil { + rd["replica_kms_key_id"] = aws.StringValue(v.Destination.EncryptionConfiguration.ReplicaKmsKeyID) + } + } + if v.Destination.Account != nil { + rd["account_id"] = aws.StringValue(v.Destination.Account) + } + if v.Destination.AccessControlTranslation != nil { + rdt := map[string]interface{}{ + "owner": aws.StringValue(v.Destination.AccessControlTranslation.Owner), + } + rd["access_control_translation"] = []interface{}{rdt} + } + t["destination"] = []interface{}{rd} + } + + if v.ID != nil { + t["id"] = aws.StringValue(v.ID) + } + if v.Prefix != nil { + t["prefix"] = aws.StringValue(v.Prefix) + } + if v.Status != nil { + t["status"] = aws.StringValue(v.Status) + } + if vssc := v.SourceSelectionCriteria; vssc != nil { + tssc := make(map[string]interface{}) + if vssc.SseKmsEncryptedObjects != nil { + tSseKms := make(map[string]interface{}) + if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { + tSseKms["enabled"] = true + } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { + tSseKms["enabled"] = false + } + tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} + } + t["source_selection_criteria"] = []interface{}{tssc} + } + + if v.Priority != nil { + t["priority"] = int(aws.Int64Value(v.Priority)) + } + + if f := v.Filter; f != nil { + m := map[string]interface{}{} + if f.Prefix != nil { + m["prefix"] = aws.StringValue(f.Prefix) + } + if t := f.Tag; t != nil { + m["tags"] = keyvaluetags.S3KeyValueTags([]*s3.Tag{t}).IgnoreAws().Map() + } + if a := f.And; a != nil { + m["prefix"] = aws.StringValue(a.Prefix) + m["tags"] = keyvaluetags.S3KeyValueTags(a.Tags).IgnoreAws().Map() + } + t["filter"] = []interface{}{m} + + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { + t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + } + } + + rules = append(rules, t) + } + d.Set("rules", schema.NewSet(rulesHash, rules)) return nil } @@ -337,7 +455,6 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m return fmt.Errorf("Error putting S3 replication configuration: %s", err) } - return nil return resourceAwsS3BucketReplicationConfigurationRead(d, meta) } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index ed348eb3c912..c12697906f40 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,15 +2,44 @@ package aws import ( "fmt" + "regexp" + "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "regexp" - "testing" ) +func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { + rInt := acctest.RandInt() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + ), + }, + }, + }) +} + func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() From 9b24010250a7f2afb6cbe1819871ed49f7110e60 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 19 Aug 2021 14:05:53 -0700 Subject: [PATCH 060/304] basic tests passing --- ...aws_s3_bucket_replication_configuration.go | 5 - ...3_bucket_replication_configuration_test.go | 92 +++++-------------- 2 files changed, 23 insertions(+), 74 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4f8cc098c2a3..5742d810d4bc 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -155,12 +155,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, - - "tags": tagsSchema(), - "tags_all": tagsSchemaComputed(), }, - - CustomizeDiff: SetTagsDiff, } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c12697906f40..f872ab698f1a 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -12,34 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func TestAccAWSS3BucketReplicationConfig_1basic(t *testing.T) { - rInt := acctest.RandInt() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - ), - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { rInt := acctest.RandInt() partition := testAccGetPartition() @@ -902,6 +874,10 @@ resource "aws_s3_bucket" "destination" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "source" { @@ -910,6 +886,10 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + + lifecycle { + ignore_changes = [replication_configuration] + } } `, randInt) } @@ -945,6 +925,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -954,6 +937,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1014,6 +1000,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket" "destination3" { @@ -1023,6 +1012,9 @@ resource "aws_s3_bucket" "destination3" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1094,6 +1086,9 @@ resource "aws_s3_bucket" "destination2" { versioning { enabled = true } + lifecycle { + ignore_changes = [replication_configuration] + } } resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1316,47 +1311,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { `) } -func testAccAWSS3BucketReplicationConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination string) string { - return composeConfig(testAccAWSS3BucketReplicationConfig_iamPolicy(rName), fmt.Sprintf(` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = %[1]q - acl = "private" - - versioning { - enabled = true - } - - replication_configuration { - role = aws_iam_role.test.arn - - rules { - id = "testid" - status = "Enabled" - - filter { - prefix = "testprefix" - } - - delete_marker_replication_status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } - } -} - -resource "aws_s3_bucket_replication_configuration" "destination" { - bucket = %[2]q - - versioning { - enabled = true - } -} -`, rName, rNameDestination)) -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { From 94cc6190ac8d4cb63b3085968e13c29efd48e379 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 15:27:31 -0700 Subject: [PATCH 061/304] Update expected resource names Rename resource names to reflect new position in configuration scope of the independent resource. Use literal strings instead of fmt.Sprint in hcl concatination --- ...3_bucket_replication_configuration_test.go | 169 +++++++----------- 1 file changed, 66 insertions(+), 103 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index f872ab698f1a..4ac4e078867e 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -109,7 +108,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -130,9 +129,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -141,7 +139,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -150,7 +148,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -176,7 +174,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -197,9 +195,8 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -208,7 +205,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -218,7 +215,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule3", "priority": "3", "status": "Disabled", @@ -247,7 +244,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -267,9 +264,8 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", "status": "Enabled", @@ -278,7 +274,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { "destination.#": "1", "destination.0.storage_class": "STANDARD", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule2", "priority": "2", "status": "Enabled", @@ -306,7 +302,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -324,9 +320,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -358,9 +353,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -399,7 +393,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -417,9 +411,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -448,9 +441,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -480,7 +472,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -512,35 +504,12 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -func TestAccAWSS3BucketReplicationConfig_expectVersioningValidationError(t *testing.T) { - rInt := acctest.RandInt() - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigNoVersioning(rInt), - ExpectError: regexp.MustCompile(`versioning must be enabled to allow S3 bucket replication`), - }, - }, - }) -} - // Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { rInt := acctest.RandInt() alternateRegion := testAccGetAlternateRegion() region := testAccGetRegion() - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -578,7 +547,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -596,9 +565,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -626,9 +594,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -663,9 +630,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -701,9 +667,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -743,9 +708,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "replication_configuration.0.role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -790,7 +754,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_replication_configuration.replication" rName := acctest.RandomWithPrefix("tf-acc-test") destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") @@ -805,9 +769,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), - testAccCheckResourceAttrGlobalARN(resourceName, "replication_configuration.0.role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "1"), + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, @@ -943,7 +906,7 @@ resource "aws_s3_bucket" "destination3" { } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn rules { @@ -984,7 +947,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } - } + } `, randInt)) } @@ -1131,7 +1094,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_kms_key" "replica" { provider = "awsalternate" description = "TF Acceptance Test S3 repl KMS key" @@ -1160,11 +1123,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1187,11 +1150,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1210,11 +1173,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` data "aws_caller_identity" "current" {} resource "aws_kms_key" "replica" { @@ -1250,11 +1213,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1269,11 +1232,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1288,11 +1251,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1308,11 +1271,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1331,11 +1294,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1356,11 +1319,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1383,11 +1346,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1413,11 +1376,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id role = aws_iam_role.role.arn @@ -1440,5 +1403,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -`) +` } From cabf7d24f3394e4e96bbe42131a22311650aba4e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 23 Aug 2021 16:03:59 -0700 Subject: [PATCH 062/304] Guard against missing bucket or import id Ensure that the source bucket name is configured in the HCL Ensure that when importing the bucket name is passed in to the process as the import id value --- ...aws_s3_bucket_replication_configuration.go | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 5742d810d4bc..1eb8c0334882 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -1,6 +1,7 @@ package aws import ( + "errors" "fmt" "log" "net/http" @@ -165,7 +166,8 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta if v, ok := d.GetOk("bucket"); ok { bucket = v.(string) } else { - // fail, can't do anything without a bucket + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") } d.SetId(bucket) @@ -173,12 +175,24 @@ func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta } func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - input := &s3.HeadBucketInput{ - Bucket: aws.String(d.Get("bucket").(string)), + if _, ok := d.GetOk("bucket"); !ok { + // during import operations, use the supplied ID for the bucket name + d.Set("bucket", d.Id()) + } + + var bucket *string + input := &s3.HeadBucketInput{} + if rsp, ok := d.GetOk("bucket"); !ok { + log.Printf("[ERROR] S3 Bucket name not set") + return errors.New("[ERROR] S3 Bucket name not set") + } else { + bucket = aws.String(rsp.(string)) + input.Bucket = bucket } + s3conn := meta.(*AWSClient).s3conn + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) @@ -215,14 +229,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) } - if _, ok := d.GetOk("bucket"); !ok { - d.Set("bucket", d.Id()) - } - // Read the bucket replication configuration replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: aws.String(d.Get("bucket").(string)), + Bucket: bucket, }) }) if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { From 0118c80dc9132ce8bbd96b875cc15cd944a83c2b Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 24 Aug 2021 16:11:06 -0700 Subject: [PATCH 063/304] Cleanout and relocate testing logic Relocate replication testing helper functions out of the s3 bucket tests and into the replication configuration testing file. Remove s3 bucket existance checks from replication testing per does not apply to the replication resource logic. --- ...3_bucket_replication_configuration_test.go | 290 ++++++++++-------- internal/service/s3/bucket_test.go | 64 ---- 2 files changed, 155 insertions(+), 199 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 4ac4e078867e..e9bf0f3c4497 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -2,6 +2,9 @@ package aws import ( "fmt" + "reflect" + "sort" + "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -9,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { @@ -106,8 +110,6 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -125,10 +127,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -172,8 +170,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -191,10 +187,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -242,8 +234,6 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -261,9 +251,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", @@ -299,7 +286,6 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -319,7 +305,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -352,7 +337,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr { Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -390,7 +374,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -410,7 +393,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -440,7 +422,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), testAccCheckAWSS3BucketReplicationRules( @@ -470,8 +451,6 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -488,10 +467,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), + Check: resource.ComposeTestCheckFunc(), }, { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -504,47 +480,8 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }) } -// Prefix issue: https://github.com/hashicorp/terraform-provider-aws/issues/6340 -func TestAccAWSS3BucketReplicationConfig_withoutPrefix(t *testing.T) { - rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() - resourceName := "aws_s3_bucket_replication_configuration.replication" - - // record the initialized providers so that we can use them to check for the instances in each region - var providers []*schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) - }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), - Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithoutPrefix(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - }, - }) -} - func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { rInt := acctest.RandInt() - alternateRegion := testAccGetAlternateRegion() - region := testAccGetRegion() partition := testAccGetPartition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -564,10 +501,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -593,10 +528,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -629,10 +562,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -666,10 +597,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -707,10 +636,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -755,8 +682,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() rName := acctest.RandomWithPrefix("tf-acc-test") - destinationResourceName := "aws_s3_bucket.destination" rNameDestination := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -766,12 +693,10 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketExists(resourceName), testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketExists(destinationResourceName), testAccCheckAWSS3BucketReplicationRules( resourceName, []*s3.ReplicationRule{ @@ -795,7 +720,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { ), }, { - Config: testAccAWSS3BucketConfigSameRegionReplicationWithV2ConfigurationNoTags(rName, rNameDestination), + Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, @@ -806,8 +731,71 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + for _, rule := range rules { + if dest := rule.Destination; dest != nil { + if account := dest.Account; account != nil && strings.HasPrefix(aws.StringValue(dest.Account), "${") { + resourceReference := strings.Replace(aws.StringValue(dest.Account), "${", "", 1) + resourceReference = strings.Replace(resourceReference, "}", "", 1) + resourceReferenceParts := strings.Split(resourceReference, ".") + resourceAttribute := resourceReferenceParts[len(resourceReferenceParts)-1] + resourceName := strings.Join(resourceReferenceParts[:len(resourceReferenceParts)-1], ".") + value := s.RootModule().Resources[resourceName].Primary.Attributes[resourceAttribute] + dest.Account = aws.String(value) + } + if ec := dest.EncryptionConfiguration; ec != nil { + if ec.ReplicaKmsKeyID != nil { + key_arn := s.RootModule().Resources["aws_kms_key.replica"].Primary.Attributes["arn"] + ec.ReplicaKmsKeyID = aws.String(strings.Replace(*ec.ReplicaKmsKeyID, "${aws_kms_key.replica.arn}", key_arn, -1)) + } + } + } + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + + conn := testAccProvider.Meta().(*AWSClient).s3conn + out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(rs.Primary.ID), + }) + if err != nil { + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + return fmt.Errorf("S3 bucket not found") + } + if rules == nil { + return nil + } + return fmt.Errorf("GetReplicationConfiguration error: %v", err) + } + + for _, rule := range out.ReplicationConfiguration.Rules { + // Sort filter tags by key. + if filter := rule.Filter; filter != nil { + if and := filter.And; and != nil { + if tags := and.Tags; tags != nil { + sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) + } + } + } + } + if !reflect.DeepEqual(out.ReplicationConfiguration.Rules, rules) { + return fmt.Errorf("bad replication rules, expected: %v, got %v", rules, out.ReplicationConfiguration.Rules) + } + + return nil + } +} + func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { - return testAccMultipleRegionProviderConfig(2) + fmt.Sprintf(` + return fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_iam_role" "role" { @@ -853,8 +841,7 @@ resource "aws_s3_bucket" "source" { lifecycle { ignore_changes = [replication_configuration] } -} -`, randInt) +} `, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { @@ -873,8 +860,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "%[1]s" } } -} -`, storageClass) +} `, storageClass) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { @@ -948,8 +934,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1034,8 +1019,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "ONEZONE_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1089,8 +1073,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD_IA" } } -} -`, randInt)) +} `, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1122,8 +1105,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1149,8 +1131,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1172,8 +1153,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1212,8 +1192,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { @@ -1231,8 +1210,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.destination.arn } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { @@ -1250,28 +1228,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` -} - -func testAccAWSS3BucketReplicationConfigNoVersioning(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { @@ -1293,8 +1250,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { @@ -1318,8 +1274,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { @@ -1345,8 +1300,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { @@ -1375,8 +1329,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } -} -` +} ` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { @@ -1402,6 +1355,73 @@ resource "aws_s3_bucket_replication_configuration" "replication" { storage_class = "STANDARD" } } +} ` } -` + +func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Mon, 30 Aug 2021 10:19:46 -0700 Subject: [PATCH 064/304] Support Existing Object Replication Adding schema for ExistingObjectReplication configuration Adding read logic to identify ExistingObjectReplication configurations added to replication rules Adding update logic to include ExistingObjectReplicaiton configuration in the PutBucketReplicaiton input --- ...aws_s3_bucket_replication_configuration.go | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1eb8c0334882..09a75e12c6fe 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -148,6 +148,21 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "existing_object_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + }, + }, + }, + }, "delete_marker_replication_status": { Type: schema.TypeString, Optional: true, @@ -248,7 +263,6 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met d.Set("role", aws.StringValue(r.Role)) } - // set rules, these need to be flattened rules := make([]interface{}, 0, len(r.Rules)) for _, v := range r.Rules { t := make(map[string]interface{}) @@ -277,6 +291,12 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met t["destination"] = []interface{}{rd} } + if v.ExistingObjectReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) + t["existing_object_replication"] = status + } + if v.ID != nil { t["id"] = aws.StringValue(v.ID) } @@ -354,6 +374,14 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.ID = aws.String(rrid.(string)) } + eor := rr["existing_object_replication"].([]interface{}) + if len(eor) > 0 { + s := eor[0].(map[string]interface{}) + rcRule.ExistingObjectReplication = &s3.ExistingObjectReplication{ + Status: aws.String(s["status"].(string)), + } + } + ruleDestination := &s3.Destination{} if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { if dest[0] != nil { From 60d212441b0a4acc173755d6ff9ddc5802cfe8fc Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 30 Aug 2021 10:23:04 -0700 Subject: [PATCH 065/304] Testing for ExistingObjectReplication In order for ExistingObjectReplication to work on s3 buckets, a request to AWS Technical Support needs to be made. Once they allow the configuration the test will operate as expected. --- ...3_bucket_replication_configuration_test.go | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e9bf0f3c4497..31bf253c6982 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -731,6 +731,68 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } +const isExistingObjectReplicationBlocked = true + +func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { + if isExistingObjectReplicationBlocked { + /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. + Once that request is approved, this can be unblocked for testing. */ + return + } + resourceName := "aws_s3_bucket_replication_configuration.replication" + rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("testid"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + StorageClass: aws.String(s3.ObjectStorageClassStandard), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("testprefix"), + }, + Priority: aws.Int64(0), + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), + }, + ExistingObjectReplication: &s3.ExistingObjectReplication{ + Status: aws.String(s3.ExistingObjectReplicationStatusEnabled), + }, + }, + }, + ), + ), + }, + { + Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_destroy", "acl"}, + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] @@ -1425,3 +1487,76 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } `, rName, rNameDestination, rInt) } + +func testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Wed, 1 Sep 2021 08:39:13 -0700 Subject: [PATCH 066/304] Adding support for Replication Time Control new schema definition for "replication_time" along with update and read logic. tracking upstream changes, adopt "waiter" module --- ...aws_s3_bucket_replication_configuration.go | 62 +++++++++++++++- ...3_bucket_replication_configuration_test.go | 72 +++++++++++++++++++ 2 files changed, 131 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 09a75e12c6fe..4fd9a8ce7c73 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -91,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "replication_time": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + }, + "time": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -157,7 +188,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), }, }, @@ -208,7 +239,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met s3conn := meta.(*AWSClient).s3conn - err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + err := resource.Retry(waiter.BucketCreatedTimeout, func() *resource.RetryError { _, err := s3conn.HeadBucket(input) if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { @@ -288,10 +319,20 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } rd["access_control_translation"] = []interface{}{rdt} } + if v.Destination.ReplicationTime != nil { + if v.Destination.ReplicationTime.Status != nil { + rd["replication_time"] = map[string]interface{}{ + "status": v.Destination.ReplicationTime.Status, + "time": map[string]interface{}{ + "minutes": v.Destination.ReplicationTime.Time.Minutes, + }, + } + } + } t["destination"] = []interface{}{rd} } - if v.ExistingObjectReplication.Status != nil { + if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) t["existing_object_replication"] = status @@ -408,6 +449,21 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) ruleDestination.AccessControlTranslation = ruleAclTranslation } + + rt, ok := bd["replication_time"].([]interface{}) + if ok && len(rt) > 0 { + s := rt[0].(map[string]interface{}) + if t, ok := s["time"].([]interface{}); ok && len(t) > 0 { + m := t[0].(map[string]interface{}) + ruleDestination.ReplicationTime = &s3.ReplicationTime{ + Status: aws.String(s["status"].(string)), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 31bf253c6982..7d44e94c1664 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -448,6 +448,54 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo }) } +func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigRTC(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + ReplicationTime: &s3.ReplicationTime{ + Status: aws.String(s3.ReplicationTimeStatusEnabled), + Time: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -925,6 +973,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } `, storageClass) } +func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } + } + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), From 6d2fdcd6bb123e8c38b74938434cd743ed9b39be Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 14:29:24 -0700 Subject: [PATCH 067/304] Adding Metrics support Metrics are a requirement for the Replication Time Control functionality. Adding it here. Restructure the configuration read logic for Replication Time to be more correct and inline with expected data structures Update tests to reflect changes --- ...aws_s3_bucket_replication_configuration.go | 66 +++++++++++++++++-- ...3_bucket_replication_configuration_test.go | 27 ++++++-- 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 4fd9a8ce7c73..1cf97c4b1d82 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -92,6 +92,36 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { }, }, }, + "metrics": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + }, + "event_threshold": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + }, + }, + }, "replication_time": { Type: schema.TypeList, Optional: true, @@ -320,13 +350,23 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met rd["access_control_translation"] = []interface{}{rdt} } if v.Destination.ReplicationTime != nil { + drt := make(map[string]interface{}) if v.Destination.ReplicationTime.Status != nil { - rd["replication_time"] = map[string]interface{}{ - "status": v.Destination.ReplicationTime.Status, - "time": map[string]interface{}{ - "minutes": v.Destination.ReplicationTime.Time.Minutes, - }, - } + drt["status"] = aws.StringValue(v.Destination.ReplicationTime.Status) + drtm := make(map[string]interface{}) + drtm["minutes"] = aws.Int64Value(v.Destination.ReplicationTime.Time.Minutes) + drt["time"] = []interface{}{drtm} + rd["replication_time"] = []interface{}{drt} + } + } + if v.Destination.Metrics != nil { + dm := make(map[string]interface{}) + if v.Destination.Metrics.Status != nil { + dm["status"] = aws.StringValue(v.Destination.Metrics.Status) + dmetm := make(map[string]interface{}) + dmetm["minutes"] = aws.Int64Value(v.Destination.Metrics.EventThreshold.Minutes) + dm["event_threshold"] = []interface{}{dmetm} + rd["metrics"] = []interface{}{dm} } } t["destination"] = []interface{}{rd} @@ -464,6 +504,20 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } } + rm, ok := bd["metrics"].([]interface{}) + if ok && len(rm) > 0 { + s := rm[0].(map[string]interface{}) + if et, ok := s["event_threshold"].([]interface{}); ok && len(et) > 0 { + m := et[0].(map[string]interface{}) + ruleDestination.Metrics = &s3.Metrics{ + Status: aws.String(s["status"].(string)), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(int64(m["minutes"].(int))), + }, + } + } + } + } } rcRule.Destination = ruleDestination diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 7d44e94c1664..70ff1e4cea98 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -475,7 +475,8 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { resourceName, []*s3.ReplicationRule{ { - ID: aws.String("foobar"), + ID: aws.String("foobar"), + Priority: aws.Int64(0), Destination: &s3.Destination{ Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), ReplicationTime: &s3.ReplicationTime{ @@ -484,8 +485,19 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { Minutes: aws.Int64(15), }, }, + Metrics: &s3.Metrics{ + Status: aws.String(s3.MetricsStatusEnabled), + EventThreshold: &s3.ReplicationTimeValue{ + Minutes: aws.Int64(15), + }, + }, + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), }, - Prefix: aws.String("foo"), Status: aws.String(s3.ReplicationRuleStatusEnabled), }, }, @@ -981,9 +993,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { rules { id = "foobar" - prefix = "foo" + filter { + prefix = "foo" + } status = "Enabled" - destination { bucket = aws_s3_bucket.destination.arn replication_time { @@ -992,6 +1005,12 @@ resource "aws_s3_bucket_replication_configuration" "replication" { minutes = 15 } } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } + } } } }` From 8d2fa83cc01c7dad0c8bb3fd7b67cd56c1ed4a62 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 1 Sep 2021 16:14:24 -0700 Subject: [PATCH 068/304] Adding Replica Modifications support, with tests Update the the source_selection_criteria configuration to include the replica_modificaions. Refactored sse_kms_encrypted_objects schema to map closer to the actual AWS SDK structure. --- ...aws_s3_bucket_replication_configuration.go | 44 ++++++--- ...3_bucket_replication_configuration_test.go | 99 +++++++++++++++---- 2 files changed, 109 insertions(+), 34 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 1cf97c4b1d82..8cfffba4044e 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -159,7 +159,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "sse_kms_encrypted_objects": { @@ -169,9 +169,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + }, + }, + }, + }, + "replica_modifications": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), }, }, }, @@ -391,11 +407,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met tssc := make(map[string]interface{}) if vssc.SseKmsEncryptedObjects != nil { tSseKms := make(map[string]interface{}) - if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { - tSseKms["enabled"] = true - } else if aws.StringValue(vssc.SseKmsEncryptedObjects.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { - tSseKms["enabled"] = false - } + tSseKms["status"] = aws.StringValue(vssc.SseKmsEncryptedObjects.Status) tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} } t["source_selection_criteria"] = []interface{}{tssc} @@ -530,14 +542,18 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m if sseKms[0] != nil { sseKmsValues := sseKms[0].(map[string]interface{}) sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} - if sseKmsValues["enabled"].(bool) { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) - } else { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) - } + sseKmsEncryptedObjects.Status = aws.String(sseKmsValues["status"].(string)) ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects } } + if sscRm, ok := sscValues["replica_modifications"].([]interface{}); ok && len(sscRm) > 0 { + if sscRm[0] != nil { + replicaModValues := sscRm[0].(map[string]interface{}) + replicaModifications := &s3.ReplicaModifications{} + replicaModifications.Status = aws.String(replicaModValues["status"].(string)) + ruleSsc.ReplicaModifications = replicaModifications + } + } rcRule.SourceSelectionCriteria = ruleSsc } } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 70ff1e4cea98..acb0374ab3f6 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -508,6 +508,59 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }) } +func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Priority: aws.Int64(0), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + }, + DeleteMarkerReplication: &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + }, + Filter: &s3.ReplicationRuleFilter{ + Prefix: aws.String("foo"), + }, + Status: aws.String(s3.ReplicationRuleStatusEnabled), + SourceSelectionCriteria: &s3.SourceSelectionCriteria{ + ReplicaModifications: &s3.ReplicaModifications{ + Status: aws.String(s3.ReplicaModificationsStatusEnabled), + }, + }, + }, + }, + ), + ), + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { rInt := acctest.RandInt() @@ -1016,6 +1069,30 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } +func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { + return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` +resource "aws_s3_bucket_replication_configuration" "replication" { + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn + + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" + } + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}` +} + func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { return composeConfig( testAccAWSS3BucketReplicationConfigBasic(randInt), @@ -1254,7 +1331,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1341,7 +1418,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { source_selection_criteria { sse_kms_encrypted_objects { - enabled = true + status = "Enabled" } } } @@ -1366,24 +1443,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } ` } -func testAccAWSS3BucketReplicationConfigWithoutPrefix(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -} ` -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { From 7d246e558185dafed89ae6c3e5b4faaa9755518e Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 14:42:41 -0700 Subject: [PATCH 069/304] terrafmt --- ...3_bucket_replication_configuration_test.go | 570 +++++++++--------- 1 file changed, 285 insertions(+), 285 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index acb0374ab3f6..522c33899795 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1002,94 +1002,94 @@ resource "aws_s3_bucket" "destination" { } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket" "source" { - bucket = "tf-test-bucket-source-%[1]d" + bucket = "tf-test-bucket-source-%[1]d" versioning { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } -} `, randInt) +}`, randInt) } func testAccAWSS3BucketReplicationConfig(randInt int, storageClass string) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "%[1]s" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "%[1]s" } -} `, storageClass) + } +}`, storageClass) } func testAccAWSS3BucketReplicationConfigRTC(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" + rules { + id = "foobar" + filter { + prefix = "foo" + } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + replication_time { + status = "Enabled" + time { + minutes = 15 + } } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - replication_time { - status = "Enabled" - time { - minutes = 15 - } - } - metrics { - status = "Enabled" - event_threshold { - minutes = 15 - } - } + metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } } } + } }` } func testAccAWSS3BucketReplicationConfigReplicaMods(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "foobar" - filter { - prefix = "foo" - } - source_selection_criteria { - replica_modifications { - status = "Enabled" - } - } - status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn + rules { + id = "foobar" + filter { + prefix = "foo" + } + source_selection_criteria { + replica_modifications { + status = "Enabled" } } + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + } + } }` } @@ -1105,7 +1105,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1117,54 +1117,54 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + bucket = aws_s3_bucket.source.id + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter {} + filter {} - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } - -} `, randInt)) + } + +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { @@ -1179,7 +1179,7 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } @@ -1191,65 +1191,65 @@ resource "aws_s3_bucket" "destination3" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } + } - rules { - id = "rule3" - priority = 3 - status = "Disabled" + rules { + id = "rule3" + priority = 3 + status = "Disabled" - filter { - prefix = "prefix3" + filter { + prefix = "prefix3" - tags = { - Key3 = "Value3" - } + tags = { + Key3 = "Value3" } + } - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { @@ -1264,46 +1264,46 @@ resource "aws_s3_bucket" "destination2" { enabled = true } lifecycle { - ignore_changes = [replication_configuration] + ignore_changes = [replication_configuration] } } resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "rule1" - priority = 1 - status = "Enabled" + rules { + id = "rule1" + priority = 1 + status = "Enabled" - filter { - prefix = "prefix1" - } + filter { + prefix = "prefix1" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } + } - rules { - id = "rule2" - priority = 2 - status = "Enabled" + rules { + id = "rule2" + priority = 2 + status = "Enabled" - filter { - tags = { - Key2 = "Value2" - } + filter { + tags = { + Key2 = "Value2" } + } - destination { - bucket = aws_s3_bucket.destination2.arn - storage_class = "STANDARD_IA" - } + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" } -} `, randInt)) + } +}`, randInt)) } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(randInt int) string { @@ -1316,26 +1316,26 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(randInt int) string { @@ -1344,24 +1344,24 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(randInt int) string { @@ -1370,20 +1370,20 @@ data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(randInt int) string { @@ -1398,176 +1398,176 @@ resource "aws_kms_key" "replica" { resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - account_id = data.aws_caller_identity.current.account_id - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - replica_kms_key_id = aws_kms_key.replica.arn + destination { + account_id = data.aws_caller_identity.current.account_id + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + replica_kms_key_id = aws_kms_key.replica.arn - access_control_translation { - owner = "Destination" - } + access_control_translation { + owner = "Destination" } + } - source_selection_criteria { - sse_kms_encrypted_objects { - status = "Enabled" - } + source_selection_criteria { + sse_kms_encrypted_objects { + status = "Enabled" } } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithoutStorageClass(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - prefix = "foo" - status = "Enabled" + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - } + destination { + bucket = aws_s3_bucket.destination.arn } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - prefix = "foo" - } + filter { + prefix = "foo" + } - delete_marker_replication_status = "Enabled" + delete_marker_replication_status = "Enabled" - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 42 + priority = 42 - filter { - tags = { - ReplicateMe = "Yes" - } + filter { + tags = { + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - priority = 41 + priority = 41 - filter { - prefix = "foo" + filter { + prefix = "foo" - tags = { - AnotherTag = "OK" - ReplicateMe = "Yes" - } + tags = { + AnotherTag = "OK" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn + role = aws_iam_role.role.arn - rules { - id = "foobar" - status = "Enabled" + rules { + id = "foobar" + status = "Enabled" - filter { - tags = { - AnotherTag = "OK" - Foo = "Bar" - ReplicateMe = "Yes" - } + filter { + tags = { + AnotherTag = "OK" + Foo = "Bar" + ReplicateMe = "Yes" } + } - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" } -} ` + } +}` } func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { From c991dffd86044045146cf618425957279401764d Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 2 Sep 2021 15:46:23 -0700 Subject: [PATCH 070/304] terrafmt --- ...3_bucket_replication_configuration_test.go | 89 ++++++++++--------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 522c33899795..012e5f5bb750 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1573,7 +1573,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { return fmt.Sprintf(` resource "aws_iam_role" "test" { - name = %[1]q + name = "%[1]s" assume_role_policy = < Date: Fri, 3 Sep 2021 15:50:53 -0700 Subject: [PATCH 071/304] Initial documentation for new resource Adding documentation page for the new independent resource. Initialized with content copied over from the s3_bucket.html.markdown page. --- ...et_replication_configuration.html.markdown | 190 ++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 website/docs/r/s3_bucket_replication_configuration.html.markdown diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown new file mode 100644 index 000000000000..9333835f4d57 --- /dev/null +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -0,0 +1,190 @@ +--- +subcategory: "S3" +layout: "aws" +page_title: "AWS: aws_s3_bucket_replication_configuration" +description: |- + Provides a S3 bucket replication configuration resource. +--- + +# Resource: aws_s3_bucket_replication_configuration + +Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. + +## Example Usage + +### Using replication configuration + +```terraform +provider "aws" { + region = "eu-west-1" +} + +provider "aws" { + alias = "central" + region = "eu-central-1" +} + +resource "aws_iam_role" "replication" { + name = "tf-iam-role-replication-12345" + + assume_role_policy = < **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. +Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `destination` - (Required) Specifies the destination for the rule (documented below). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. +* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. + +The `destination` object supports the following: + +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + `sse_kms_encrypted_objects` source selection criteria. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. + +The `source_selection_criteria` object supports the following: + +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. + +The `sse_kms_encrypted_objects` object supports the following: + +* `enabled` - (Required) Boolean which indicates if this criteria is enabled. + +The `filter` object supports the following: + +* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. +The rule applies only to objects having all the tags in its tagset. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +## Import + +S3 bucket replication configuration can be imported using the `bucket`, e.g. + +``` +$ terraform import aws_s3_bucket_replication_configuration.replication bucket-name +``` From ed53680190b43aad124dc5c62ae78a594dc2f3f9 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 9 Sep 2021 14:26:09 -0700 Subject: [PATCH 072/304] adding new feature documentation --- website/docs/r/s3_bucket.html.markdown | 14 +++++++ ...et_replication_configuration.html.markdown | 40 ++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index bdce5f7f38c0..6d243a9dccf0 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -301,6 +301,10 @@ resource "aws_s3_bucket" "source" { } ``` +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + + + ### Enable Default Server Side Encryption ```terraform @@ -436,6 +440,16 @@ The `noncurrent_version_transition` object supports the following The `replication_configuration` object supports the following: +~> **NOTE:** See the `aws_s3_bucket_replication_configuration` resource documentation to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9333835f4d57..9870635e0692 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -107,6 +107,11 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + lifecycle { + ignore_changes = [ + replication_configuration + ] + } } aws_s3_bucket_replication_configuration replication { @@ -126,6 +131,17 @@ aws_s3_bucket_replication_configuration replication { ``` +~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. + +``` +lifecycle { + ignore_changes = [ + replication_configuration + ] +} +``` + + ## Argument Reference The following arguments are supported: @@ -142,6 +158,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). @@ -153,6 +170,10 @@ Replication configuration V1 supports filtering based on only the `prefix` attri ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +The `existing_object_replication` object supports the following: + +* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -161,15 +182,32 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). +* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). + +The `replication_time` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + +The `metrics` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `source_selection_criteria` object supports the following: +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between + replicas and source objects (documented below). + * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` in `destination` must be specified as well. +The `replica_modifications` object supports the following: + +* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. + The `sse_kms_encrypted_objects` object supports the following: -* `enabled` - (Required) Boolean which indicates if this criteria is enabled. +* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `filter` object supports the following: From e4b87afcb83771ab5a636ac9cca76d9f75eb7515 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:09 -0700 Subject: [PATCH 073/304] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 4 ++-- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index 012e5f5bb750..e550fcad46da 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,11 +844,11 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = true +const isExistingObjectReplicationBlocked = false func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { - /* https://aws.amazon.com/blogs/storage/replicating-existing-objects-between-s3-buckets/ + /* https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. Once that request is approved, this can be unblocked for testing. */ return diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 9870635e0692..2511d95454bb 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -172,6 +172,8 @@ Replication configuration V1 supports filtering based on only the `prefix` attri The `existing_object_replication` object supports the following: +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) + * `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. The `destination` object supports the following: From 8a024bd3394bc9b9d7e2bfee9a571383d3bb5113 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 10 Sep 2021 13:37:56 -0700 Subject: [PATCH 074/304] Documentation updates for existing object replication --- aws/resource_aws_s3_bucket_replication_configuration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index e550fcad46da..fed215ad3c14 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -844,7 +844,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { }) } -const isExistingObjectReplicationBlocked = false +const isExistingObjectReplicationBlocked = true func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { if isExistingObjectReplicationBlocked { From 64d2ae8d6168c103f89dd4036d2a29ae783cc119 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Mon, 13 Sep 2021 15:01:45 -0700 Subject: [PATCH 075/304] adding headers and source examples to documentation --- ...et_replication_configuration.html.markdown | 162 +++++++++++++++--- 1 file changed, 139 insertions(+), 23 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 2511d95454bb..091286c63abb 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -131,6 +131,76 @@ aws_s3_bucket_replication_configuration replication { ``` +### Bi-Directional Replication + +``` + +... + +resource "aws_s3_bucket" "east" { + bucket = "tf-test-bucket-east-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +resource "aws_s3_bucket" "west" { + provider = west + bucket = "tf-test-bucket-west-12345" + + versioning { + enabled = true + } + + lifecycle { + ignore_changes = [ + replication_configuration + ] + } +} + +aws_s3_bucket_replication_configuration "east_to_west" { + role = aws_iam_role.east_replication.arn + bucket = aws_s3_bucket.east.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.west.arn + storage_class = "STANDARD" + } + } +} + +aws_s3_bucket_replication_configuration "west_to_east" { + role = aws_iam_role.west_replication.arn + bucket = aws_s3_bucket.west.id + rules { + id = "foobar" + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.east.arn + storage_class = "STANDARD" + } + } +} +``` + +## Usage Notes + +This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. + ~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. ``` @@ -140,11 +210,17 @@ lifecycle { ] } ``` +The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +* `replica_modifications` - Added to the `source_selection_criteria` configuration +* `metrics` - Added to the `destination` configuration +* `replication_time` - Added to the `destination` configuration +* `existing_object_replication` - Added to the replication rule + +Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference -The following arguments are supported: +## Argument Reference The `replication_configuration` object supports the following: @@ -152,30 +228,42 @@ The `replication_configuration` object supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication (documented below). -The `rules` object supports the following: +### Rules + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. ~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. + +The `rules` object supports the following: + With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). * `destination` - (Required) Specifies the destination for the rule (documented below). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). -* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. +* `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +### Rule Existing Object Replication + +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) +``` +existing_object_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) Whether the existing objects should be replicated. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -187,29 +275,61 @@ The `destination` object supports the following: * `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). * `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +### Replication Time Control + +``` +replication_time { + status = "Enabled" + time { + minutes = 15 + } +} +``` + The `replication_time` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. + +### Metrics + +``` +metrics { + status = "Enabled" + event_threshold { + minutes = 15 + } +} +``` The `metrics` object supports the following: -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. + +### Source Selection Criteria The `source_selection_criteria` object supports the following: +``` +source_selection_criteria { + replica_modification { + status = "Enabled" + } + sse_kms_encrypted_objects { + status = "Enabled" + } +} +``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects (documented below). - -* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. - -The `replica_modifications` object supports the following: + replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -* `status` - (Required) The status of the Replica Modifications sync. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` + in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -The `sse_kms_encrypted_objects` object supports the following: + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `status` - (Required) The status of the SSE KMS encryption. Either `Enabled` or `Disabled`. The object is ignored if status is not Enabled. +### Replication Rule Filter The `filter` object supports the following: @@ -217,10 +337,6 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 54585b3371adff762db6df0d8cf3370d68bef99c Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 14 Sep 2021 10:30:09 -0700 Subject: [PATCH 076/304] adding internal documentation links, cleanup --- ...et_replication_configuration.html.markdown | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 091286c63abb..099b3261ab78 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -8,7 +8,7 @@ description: |- # Resource: aws_s3_bucket_replication_configuration -Provides a configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) for existing s3 buckets. +Provides an independent configuration resource for S3 bucket [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). ## Example Usage @@ -199,9 +199,9 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are available in the `replication_configuration` block of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` block. Faliure to add the lifecycle configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -~> **NOTE:** To avoid conflicts always add the following lifecycle block to the `aws_s3_bucket` resource of the source bucket. +~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. ``` lifecycle { @@ -210,25 +210,25 @@ lifecycle { ] } ``` -The `aws_s3_bucket_replication_configuration` resource adds the following features that are not available in the `aws_s3_bucket` resource: +The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: -* `replica_modifications` - Added to the `source_selection_criteria` configuration -* `metrics` - Added to the `destination` configuration -* `replication_time` - Added to the `destination` configuration -* `existing_object_replication` - Added to the replication rule +* `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) +* `metrics` - Added to the `destination` configuration object [documented below](#metrics) +* `replication_time` - Added to the `destination` configuration object [documented below](#replication_time) +* `existing_object_replication` - Added to the replication rule object [documented below](#existing_object_replication) Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) ## Argument Reference -The `replication_configuration` object supports the following: +The `replication_configuration` resource supports the following: -* `bucket` - (Required) The ARN of the source S3 bucket where you want Amazon S3 to monitor. +* `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. -* `rules` - (Required) Specifies the rules managing the replication (documented below). +* `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### Rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -236,20 +236,19 @@ The `replication_configuration` object supports the following: The `rules` object supports the following: -With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. -Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. -* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations (documented below). +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). -* `destination` - (Required) Specifies the destination for the rule (documented below). -* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies (documented below). +* `destination` - (Required) Specifies the destination for the rule [documented below](#destination). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. * `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. -* `source_selection_criteria` - (Optional) Specifies special object selection criteria (documented below). +* `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### Rule Existing Object Replication +### exiting_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) @@ -263,7 +262,7 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. -### Destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -272,10 +271,10 @@ The `destination` object supports the following: `sse_kms_encrypted_objects` source selection criteria. * `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. * `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. -* `replication_time` - (Optional) Must be used in conjunction with `metrics` (documented below). -* `metrics` - (Optional) Must be used in conjunction with `replication_time` (documented below). +* `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). +* `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). -### Replication Time Control +### replication_time ``` replication_time { @@ -291,7 +290,7 @@ The `replication_time` object supports the following: * `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. -### Metrics +### metrics ``` metrics { @@ -307,7 +306,7 @@ The `metrics` object supports the following: * `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. -### Source Selection Criteria +### source_selection_criteria The `source_selection_criteria` object supports the following: ``` @@ -322,14 +321,14 @@ source_selection_criteria { ``` * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` is required either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -### Replication Rule Filter +### filter The `filter` object supports the following: From 80b6e26a05b55e68ca8336313e422a65b71a0b15 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 15 Sep 2021 16:34:21 -0700 Subject: [PATCH 077/304] Align delete_marker_replication with other objects --- ...aws_s3_bucket_replication_configuration.go | 50 +++++++++++-------- ...3_bucket_replication_configuration_test.go | 12 +++-- internal/service/s3/bucket.go | 2 +- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 8cfffba4044e..581ac90653f7 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -96,13 +96,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.MetricsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), }, "event_threshold": { Type: schema.TypeList, @@ -126,13 +126,13 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { Type: schema.TypeList, Optional: true, MinItems: 1, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicationTimeStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), }, "time": { Type: schema.TypeList, @@ -172,7 +172,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.SseKmsEncryptedObjectsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), }, }, }, @@ -187,7 +187,7 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ReplicaModificationsStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), }, }, }, @@ -235,15 +235,25 @@ func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { "status": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{s3.ExistingObjectReplicationStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), }, }, }, }, - "delete_marker_replication_status": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + "delete_marker_replication": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), + }, + }, + }, }, }, }, @@ -391,7 +401,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met if v.ExistingObjectReplication != nil { status := make(map[string]interface{}) status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) - t["existing_object_replication"] = status + t["existing_object_replication"] = []interface{}{status} } if v.ID != nil { @@ -431,8 +441,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } t["filter"] = []interface{}{m} - if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil && aws.StringValue(v.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { - t["delete_marker_replication_status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil { + status := make(map[string]interface{}) + status["status"] = aws.StringValue(v.DeleteMarkerReplication.Status) + t["delete_marker_replication"] = []interface{}{status} } } @@ -573,13 +585,11 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) } - if dmr, ok := rr["delete_marker_replication_status"].(string); ok && dmr != "" { - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(dmr), - } - } else { + dmr, ok := rr["delete_marker_replication"].([]interface{}) + if ok && len(dmr) > 0 { + s := dmr[0].(map[string]interface{}) rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s["status"].(string)), } } } else { diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index fed215ad3c14..c5c6879cf80b 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1479,7 +1479,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "foo" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1628,7 +1630,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "testprefix" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1701,7 +1705,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" } - delete_marker_replication_status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 5bc4db22b414..b52ccb998da7 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2662,7 +2662,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } From d198d93187e7a7c4f0d12e2042fb0b2da9e7cf9f Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 08:31:08 -0700 Subject: [PATCH 078/304] Update delete_marker replication docs to reflect changes --- ...ket_replication_configuration.html.markdown | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 099b3261ab78..44d46f1c4d09 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -239,7 +239,7 @@ The `rules` object supports the following: With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). -* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `"Enabled"`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `delete_marker_replication` - (Optional) Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when `filter` is used)[documented below](#delete_marker_replication). * `destination` - (Required) Specifies the destination for the rule [documented below](#destination). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. @@ -262,6 +262,22 @@ existing_object_replication { * `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +### delete_marker_replication + +~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. + +~> **NOTE:** This argument is only available with V2 replication configurations. + +The `delete_marker_replication` object supports the following: + +``` +delete_marker_replication { + status = "Enabled" +} +``` +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + + ### destination The `destination` object supports the following: From e5d3d0625a10dee1a815fae3ddef7c9c6ed13cfe Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 13:59:37 -0700 Subject: [PATCH 079/304] Documentation adjustments fix typos shift notes to be above examples remove unnecssary words expand on some attribute concepts that maybe obscure --- website/docs/r/s3_bucket.html.markdown | 6 +-- ...et_replication_configuration.html.markdown | 51 ++++++++++--------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 6d243a9dccf0..57bb495ebd70 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -178,6 +178,8 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using replication configuration +~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. + ```terraform provider "aws" { region = "eu-west-1" @@ -301,10 +303,6 @@ resource "aws_s3_bucket" "source" { } ``` -~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. - - - ### Enable Default Server Side Encryption ```terraform diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 44d46f1c4d09..de512bcb34d4 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,9 +133,9 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -``` +```terraform -... +#... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -199,17 +199,18 @@ aws_s3_bucket_replication_configuration "west_to_east" { ## Usage Notes -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. - ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -``` +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. + +```terraform lifecycle { ignore_changes = [ replication_configuration ] } ``` + The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: * `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) @@ -248,51 +249,53 @@ With the `filter` attribute, you can specify object filters based on the object * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### exiting_object_replication +### existing_object_replication ~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) The `existing_object_replication` object supports the following: -``` +```terraform existing_object_replication { status = "Enabled" } ``` -* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. ### delete_marker_replication -~> **NOTE:** This configuration format differes from that of `aws_s3_bucket`. +~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. ~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: -``` +```terraform delete_marker_replication { status = "Enabled" } ``` -* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. ### destination The `destination` object supports the following: -* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. -* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. * `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with `sse_kms_encrypted_objects` source selection criteria. -* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. -* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. * `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). * `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). ### replication_time -``` +```terraform replication_time { status = "Enabled" time { @@ -303,12 +306,12 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics -``` +```terraform metrics { status = "Enabled" event_threshold { @@ -319,13 +322,14 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria The `source_selection_criteria` object supports the following: -``` + +```terraform source_selection_criteria { replica_modification { status = "Enabled" @@ -336,13 +340,14 @@ source_selection_criteria { } ``` + ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. + * `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. The object is ignored if status is not `"Enabled"`. + in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. - ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. ### filter From 6c15db52797a9b7561a1448855be36a6727a8345 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:15:32 -0700 Subject: [PATCH 080/304] linting --- ...s3_bucket_replication_configuration.html.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index de512bcb34d4..ec38b8c4d2d7 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -229,7 +229,7 @@ The `replication_configuration` resource supports the following: * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). -### rules +### rules ~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. @@ -268,7 +268,7 @@ existing_object_replication { ~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. -~> **NOTE:** This argument is only available with V2 replication configurations. +~> **NOTE:** This argument is only available with V2 replication configurations. The `delete_marker_replication` object supports the following: @@ -281,7 +281,7 @@ delete_marker_replication { * `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. -### destination +### destination The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. @@ -306,7 +306,7 @@ replication_time { The `replication_time` object supports the following: -* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. * `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. ### metrics @@ -322,7 +322,7 @@ metrics { The `metrics` object supports the following: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. * `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. ### source_selection_criteria @@ -342,7 +342,7 @@ source_selection_criteria { ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. -* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between +* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. * `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` From 1a887de980d1619b9eedb1a1421ce2c2866b2392 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 14:41:01 -0700 Subject: [PATCH 081/304] linting/fmt --- aws/resource_aws_s3_bucket_replication_configuration.go | 2 +- .../docs/r/s3_bucket_replication_configuration.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 581ac90653f7..9fed71889cfa 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -347,7 +347,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met r := replication.ReplicationConfiguration // set role if r.Role != nil && aws.StringValue(r.Role) != "" { - d.Set("role", aws.StringValue(r.Role)) + d.Set("role", r.Role) } rules := make([]interface{}, 0, len(r.Rules)) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index ec38b8c4d2d7..c119ce2bcd0d 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Argument Reference +## Attributes Reference The `replication_configuration` resource supports the following: From 01bd0b247d9bca626059290905ae1901784ae552 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 16 Sep 2021 15:00:32 -0700 Subject: [PATCH 082/304] adding missing attribute reference to documentation --- .../r/s3_bucket_replication_configuration.html.markdown | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index c119ce2bcd0d..eea1a86a844e 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -221,7 +221,7 @@ The `aws_s3_bucket_replication_configuration` resource provides the following fe Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) -## Attributes Reference +## Argument Reference The `replication_configuration` resource supports the following: @@ -357,6 +357,12 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* id - Resource id is the s3 source bucket name. + ## Import S3 bucket replication configuration can be imported using the `bucket`, e.g. From 0d92fba850a5f7e39f01977ae8e77540eee0d800 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Thu, 23 Sep 2021 07:46:36 -0700 Subject: [PATCH 083/304] use untyped code blocks until new resource is merged to validate --- ...ucket_replication_configuration.html.markdown | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index eea1a86a844e..ee5ce245b7bb 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -14,7 +14,7 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration -```terraform +``` provider "aws" { region = "eu-west-1" } @@ -133,7 +133,7 @@ aws_s3_bucket_replication_configuration replication { ### Bi-Directional Replication -```terraform +``` #... @@ -203,7 +203,7 @@ aws_s3_bucket_replication_configuration "west_to_east" { This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. -```terraform +``` lifecycle { ignore_changes = [ replication_configuration @@ -255,7 +255,7 @@ With the `filter` attribute, you can specify object filters based on the object The `existing_object_replication` object supports the following: -```terraform +``` existing_object_replication { status = "Enabled" } @@ -272,7 +272,7 @@ existing_object_replication { The `delete_marker_replication` object supports the following: -```terraform +``` delete_marker_replication { status = "Enabled" } @@ -295,7 +295,7 @@ The `destination` object supports the following: ### replication_time -```terraform +``` replication_time { status = "Enabled" time { @@ -311,7 +311,7 @@ The `replication_time` object supports the following: ### metrics -```terraform +``` metrics { status = "Enabled" event_threshold { @@ -329,7 +329,7 @@ The `metrics` object supports the following: The `source_selection_criteria` object supports the following: -```terraform +``` source_selection_criteria { replica_modification { status = "Enabled" From e4778325052a61e60a52adb0a6d78339d7931dab Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:49:58 -0400 Subject: [PATCH 084/304] address linter-related errors --- ...3_bucket_replication_configuration_test.go | 14 ++++++-------- ...et_replication_configuration.html.markdown | 19 +++++++++---------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index c5c6879cf80b..d8274ed6cf2c 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -1480,8 +1480,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1631,8 +1631,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1706,8 +1706,8 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } delete_marker_replication { - status = "Enabled" - } + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn @@ -1715,7 +1715,5 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } } - - `, rName, rNameDestination, rInt) } diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index ee5ce245b7bb..90e3faf8de3c 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -114,9 +114,9 @@ resource "aws_s3_bucket" "source" { } } -aws_s3_bucket_replication_configuration replication { +resource "aws_s3_bucket_replication_configuration" "replication" { role = aws_iam_role.replication.arn - bucket = aws_s3_bucket.source.id + bucket = aws_s3_bucket.source.id rules { id = "foobar" prefix = "foo" @@ -128,14 +128,13 @@ aws_s3_bucket_replication_configuration replication { } } } - ``` ### Bi-Directional Replication ``` -#... +# ... other configuration ... resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" @@ -166,9 +165,9 @@ resource "aws_s3_bucket" "west" { } } -aws_s3_bucket_replication_configuration "east_to_west" { +resource "aws_s3_bucket_replication_configuration" "east_to_west" { role = aws_iam_role.east_replication.arn - bucket = aws_s3_bucket.east.id + bucket = aws_s3_bucket.east.id rules { id = "foobar" prefix = "foo" @@ -181,9 +180,9 @@ aws_s3_bucket_replication_configuration "east_to_west" { } } -aws_s3_bucket_replication_configuration "west_to_east" { +resource "aws_s3_bucket_replication_configuration" "west_to_east" { role = aws_iam_role.west_replication.arn - bucket = aws_s3_bucket.west.id + bucket = aws_s3_bucket.west.id rules { id = "foobar" prefix = "foo" @@ -201,7 +200,7 @@ aws_s3_bucket_replication_configuration "west_to_east" { ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Faliure to add the `lifecycle` configuation to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. ``` lifecycle { @@ -357,7 +356,7 @@ The `filter` object supports the following: * `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -## Attribute Reference +## Attributes Reference In addition to all arguments above, the following attributes are exported: From 3622ba5120cc8df8b99d260db6cc23de5b3b53f2 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 09:52:12 -0400 Subject: [PATCH 085/304] Update CHANGELOG for #20777 --- .changelog/20777.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/20777.txt diff --git a/.changelog/20777.txt b/.changelog/20777.txt new file mode 100644 index 000000000000..75e556fa77e9 --- /dev/null +++ b/.changelog/20777.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_s3_bucket_replication_configuration +``` From a241e956e5aa05eed45fe9de8efce9f873c1cafe Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 23 Sep 2021 10:17:16 -0400 Subject: [PATCH 086/304] forgo syntax highlighting in short snippet code blocks in documentation --- .../docs/r/s3_bucket_replication_configuration.html.markdown | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 90e3faf8de3c..8de6f01c14ee 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -133,7 +133,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication ``` - # ... other configuration ... resource "aws_s3_bucket" "east" { @@ -366,6 +365,6 @@ In addition to all arguments above, the following attributes are exported: S3 bucket replication configuration can be imported using the `bucket`, e.g. -``` +```sh $ terraform import aws_s3_bucket_replication_configuration.replication bucket-name ``` From b8be20a184a2f110778eae6defcfbd85a9db6f68 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Fri, 24 Sep 2021 10:40:10 -0700 Subject: [PATCH 087/304] Revert key renamed in error --- internal/service/s3/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index b52ccb998da7..5bc4db22b414 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2662,7 +2662,7 @@ func rulesHash(v interface{}) int { if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - if v, ok := m["delete_marker_replication"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { + if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } } From 9614e741543169ada01f7d20f3be960eecd9b362 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sun, 26 Sep 2021 13:36:06 -0700 Subject: [PATCH 088/304] Add logic for explicit delete Include delete logic for replication configuration Adding test for delete logic --- ...aws_s3_bucket_replication_configuration.go | 13 ++++ ...3_bucket_replication_configuration_test.go | 62 +++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/aws/resource_aws_s3_bucket_replication_configuration.go index 9fed71889cfa..42cbce8d6efd 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration.go +++ b/aws/resource_aws_s3_bucket_replication_configuration.go @@ -628,6 +628,19 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) + + dbri := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(bucket), + } + + _, err := s3conn.DeleteBucketReplication(dbri) + if err != nil { + return fmt.Errorf("Error removing S3 bucket replication: %s", err) + } return nil } diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/aws/resource_aws_s3_bucket_replication_configuration_test.go index d8274ed6cf2c..ff5181375500 100644 --- a/aws/resource_aws_s3_bucket_replication_configuration_test.go +++ b/aws/resource_aws_s3_bucket_replication_configuration_test.go @@ -906,6 +906,68 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) }) } +func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { + rInt := acctest.RandInt() + partition := testAccGetPartition() + iamRoleResourceName := "aws_iam_role.role" + resourceName := "aws_s3_bucket_replication_configuration.replication" + + testDeleted := func(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[r] + if ok { + return fmt.Errorf("Replication resource configuration %q should have been deleted.", r) + } + return nil + } + } + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), + testAccCheckAWSS3BucketReplicationRules( + resourceName, + []*s3.ReplicationRule{ + { + ID: aws.String("foobar"), + Destination: &s3.Destination{ + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), + StorageClass: aws.String(s3.StorageClassStandard), + }, + Prefix: aws.String("foo"), + Status: aws.String(s3.ReplicationRuleStatusEnabled), + }, + }, + ), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSS3BucketReplicationConfigBasic(rInt), + Check: resource.ComposeTestCheckFunc(testDeleted(resourceName)), + }, + }, + }) +} + func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] From 821a2b4e9009e6a9ee48d5db6a28d813014fb018 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 2 Nov 2021 09:41:45 -0700 Subject: [PATCH 089/304] move source into service/s3 dir tracking upstream changes --- .../service/s3/bucket_replication_configuration.go | 0 .../service/s3/bucket_replication_configuration_test.go | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename aws/resource_aws_s3_bucket_replication_configuration.go => internal/service/s3/bucket_replication_configuration.go (100%) rename aws/resource_aws_s3_bucket_replication_configuration_test.go => internal/service/s3/bucket_replication_configuration_test.go (100%) diff --git a/aws/resource_aws_s3_bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go similarity index 100% rename from aws/resource_aws_s3_bucket_replication_configuration.go rename to internal/service/s3/bucket_replication_configuration.go diff --git a/aws/resource_aws_s3_bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go similarity index 100% rename from aws/resource_aws_s3_bucket_replication_configuration_test.go rename to internal/service/s3/bucket_replication_configuration_test.go From a367de4692b7f8435dfe7a112d40ecd6a0779587 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Tue, 2 Nov 2021 09:51:18 -0700 Subject: [PATCH 090/304] tracking up stream changes --- internal/provider/provider.go | 21 ++++++++++--------- .../s3/bucket_replication_configuration.go | 4 ++-- .../bucket_replication_configuration_test.go | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 7471764fb8fd..c1ea5c8d3370 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1459,16 +1459,17 @@ func Provider() *schema.Provider { "aws_route53_resolver_rule": route53resolver.ResourceRule(), "aws_route53_resolver_rule_association": route53resolver.ResourceRuleAssociation(), - "aws_s3_bucket": s3.ResourceBucket(), - "aws_s3_bucket_analytics_configuration": s3.ResourceBucketAnalyticsConfiguration(), - "aws_s3_bucket_inventory": s3.ResourceBucketInventory(), - "aws_s3_bucket_metric": s3.ResourceBucketMetric(), - "aws_s3_bucket_notification": s3.ResourceBucketNotification(), - "aws_s3_bucket_object": s3.ResourceBucketObject(), - "aws_s3_bucket_ownership_controls": s3.ResourceBucketOwnershipControls(), - "aws_s3_bucket_policy": s3.ResourceBucketPolicy(), - "aws_s3_bucket_public_access_block": s3.ResourceBucketPublicAccessBlock(), - "aws_s3_object_copy": s3.ResourceObjectCopy(), + "aws_s3_bucket": s3.ResourceBucket(), + "aws_s3_bucket_analytics_configuration": s3.ResourceBucketAnalyticsConfiguration(), + "aws_s3_bucket_inventory": s3.ResourceBucketInventory(), + "aws_s3_bucket_metric": s3.ResourceBucketMetric(), + "aws_s3_bucket_notification": s3.ResourceBucketNotification(), + "aws_s3_bucket_object": s3.ResourceBucketObject(), + "aws_s3_bucket_ownership_controls": s3.ResourceBucketOwnershipControls(), + "aws_s3_bucket_policy": s3.ResourceBucketPolicy(), + "aws_s3_bucket_public_access_block": s3.ResourceBucketPublicAccessBlock(), + "aws_s3_bucket_replication_configuration": s3.ResourceBucketReplicationConfiguration(), + "aws_s3_object_copy": s3.ResourceObjectCopy(), "aws_s3_access_point": s3control.ResourceAccessPoint(), "aws_s3_account_public_access_block": s3control.ResourceAccountPublicAccessBlock(), diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 42cbce8d6efd..4c6937093410 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -1,4 +1,4 @@ -package aws +package s3 import ( "errors" @@ -18,7 +18,7 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) -func resourceAwsS3BucketReplicationConfiguration() *schema.Resource { +func ResourceBucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ Create: resourceAwsS3BucketReplicationConfigurationPut, Read: resourceAwsS3BucketReplicationConfigurationRead, diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index ff5181375500..28bc9f8e5024 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1,4 +1,4 @@ -package aws +package s3 import ( "fmt" From 37352d0d2cfaa4608c35610a201d0df9aa93ec79 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 3 Nov 2021 11:24:11 -0700 Subject: [PATCH 091/304] tracking upstream changes --- aws/provider.go | 1702 ----------------- .../s3/bucket_replication_configuration.go | 6 +- 2 files changed, 3 insertions(+), 1705 deletions(-) delete mode 100644 aws/provider.go diff --git a/aws/provider.go b/aws/provider.go deleted file mode 100644 index 5641b36adb4e..000000000000 --- a/aws/provider.go +++ /dev/null @@ -1,1702 +0,0 @@ -package aws - -import ( - "log" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/mutexkv" -) - -// Provider returns a *schema.Provider. -func Provider() *schema.Provider { - // TODO: Move the validation to this, requires conditional schemas - // TODO: Move the configuration to this, requires validation - - // The actual provider - provider := &schema.Provider{ - Schema: map[string]*schema.Schema{ - "access_key": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["access_key"], - }, - - "secret_key": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["secret_key"], - }, - - "profile": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["profile"], - }, - - "assume_role": assumeRoleSchema(), - - "shared_credentials_file": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["shared_credentials_file"], - }, - - "token": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["token"], - }, - - "region": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", - }, nil), - Description: descriptions["region"], - InputDefault: "us-east-1", // lintignore:AWSAT003 - }, - - "max_retries": { - Type: schema.TypeInt, - Optional: true, - Default: 25, - Description: descriptions["max_retries"], - }, - - "allowed_account_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ConflictsWith: []string{"forbidden_account_ids"}, - Set: schema.HashString, - }, - - "forbidden_account_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ConflictsWith: []string{"allowed_account_ids"}, - Set: schema.HashString, - }, - - "default_tags": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Configuration block with settings to default resource tags across all resources.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Resource tags to default across all resources", - }, - }, - }, - }, - - "endpoints": endpointsSchema(), - - "ignore_tags": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Configuration block with settings to ignore resource tags across all resources.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "keys": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Description: "Resource tag keys to ignore across all resources.", - }, - "key_prefixes": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Description: "Resource tag key prefixes to ignore across all resources.", - }, - }, - }, - }, - - "insecure": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["insecure"], - }, - - "skip_credentials_validation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_credentials_validation"], - }, - - "skip_get_ec2_platforms": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_get_ec2_platforms"], - }, - - "skip_region_validation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_region_validation"], - }, - - "skip_requesting_account_id": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_requesting_account_id"], - }, - - "skip_metadata_api_check": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_metadata_api_check"], - }, - - "s3_force_path_style": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["s3_force_path_style"], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "aws_acm_certificate": dataSourceAwsAcmCertificate(), - "aws_acmpca_certificate_authority": dataSourceAwsAcmpcaCertificateAuthority(), - "aws_acmpca_certificate": dataSourceAwsAcmpcaCertificate(), - "aws_ami": dataSourceAwsAmi(), - "aws_ami_ids": dataSourceAwsAmiIds(), - "aws_api_gateway_api_key": dataSourceAwsApiGatewayApiKey(), - "aws_api_gateway_domain_name": dataSourceAwsApiGatewayDomainName(), - "aws_api_gateway_resource": dataSourceAwsApiGatewayResource(), - "aws_api_gateway_rest_api": dataSourceAwsApiGatewayRestApi(), - "aws_api_gateway_vpc_link": dataSourceAwsApiGatewayVpcLink(), - "aws_apigatewayv2_api": dataSourceAwsApiGatewayV2Api(), - "aws_apigatewayv2_apis": dataSourceAwsApiGatewayV2Apis(), - "aws_appmesh_mesh": dataSourceAwsAppmeshMesh(), - "aws_appmesh_virtual_service": dataSourceAwsAppmeshVirtualService(), - "aws_arn": dataSourceAwsArn(), - "aws_autoscaling_group": dataSourceAwsAutoscalingGroup(), - "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), - "aws_availability_zone": dataSourceAwsAvailabilityZone(), - "aws_availability_zones": dataSourceAwsAvailabilityZones(), - "aws_backup_plan": dataSourceAwsBackupPlan(), - "aws_backup_selection": dataSourceAwsBackupSelection(), - "aws_backup_vault": dataSourceAwsBackupVault(), - "aws_batch_compute_environment": dataSourceAwsBatchComputeEnvironment(), - "aws_batch_job_queue": dataSourceAwsBatchJobQueue(), - "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), - "aws_caller_identity": dataSourceAwsCallerIdentity(), - "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), - "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), - "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), - "aws_cloudformation_type": dataSourceAwsCloudFormationType(), - "aws_cloudfront_cache_policy": dataSourceAwsCloudFrontCachePolicy(), - "aws_cloudfront_distribution": dataSourceAwsCloudFrontDistribution(), - "aws_cloudfront_function": dataSourceAwsCloudFrontFunction(), - "aws_cloudfront_origin_request_policy": dataSourceAwsCloudFrontOriginRequestPolicy(), - "aws_cloudhsm_v2_cluster": dataSourceCloudHsmV2Cluster(), - "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), - "aws_cloudwatch_event_connection": dataSourceAwsCloudwatchEventConnection(), - "aws_cloudwatch_event_source": dataSourceAwsCloudWatchEventSource(), - "aws_cloudwatch_log_group": dataSourceAwsCloudwatchLogGroup(), - "aws_codeartifact_authorization_token": dataSourceAwsCodeArtifactAuthorizationToken(), - "aws_codeartifact_repository_endpoint": dataSourceAwsCodeArtifactRepositoryEndpoint(), - "aws_cognito_user_pools": dataSourceAwsCognitoUserPools(), - "aws_codecommit_repository": dataSourceAwsCodeCommitRepository(), - "aws_codestarconnections_connection": dataSourceAwsCodeStarConnectionsConnection(), - "aws_cur_report_definition": dataSourceAwsCurReportDefinition(), - "aws_default_tags": dataSourceAwsDefaultTags(), - "aws_db_cluster_snapshot": dataSourceAwsDbClusterSnapshot(), - "aws_db_event_categories": dataSourceAwsDbEventCategories(), - "aws_db_instance": dataSourceAwsDbInstance(), - "aws_db_snapshot": dataSourceAwsDbSnapshot(), - "aws_db_subnet_group": dataSourceAwsDbSubnetGroup(), - "aws_directory_service_directory": dataSourceAwsDirectoryServiceDirectory(), - "aws_docdb_engine_version": dataSourceAwsDocdbEngineVersion(), - "aws_docdb_orderable_db_instance": dataSourceAwsDocdbOrderableDbInstance(), - "aws_dx_gateway": dataSourceAwsDxGateway(), - "aws_dynamodb_table": dataSourceAwsDynamoDbTable(), - "aws_ebs_default_kms_key": dataSourceAwsEbsDefaultKmsKey(), - "aws_ebs_encryption_by_default": dataSourceAwsEbsEncryptionByDefault(), - "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), - "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), - "aws_ebs_volume": dataSourceAwsEbsVolume(), - "aws_ebs_volumes": dataSourceAwsEbsVolumes(), - "aws_ec2_coip_pool": dataSourceAwsEc2CoipPool(), - "aws_ec2_coip_pools": dataSourceAwsEc2CoipPools(), - "aws_ec2_instance_type": dataSourceAwsEc2InstanceType(), - "aws_ec2_instance_type_offering": dataSourceAwsEc2InstanceTypeOffering(), - "aws_ec2_instance_type_offerings": dataSourceAwsEc2InstanceTypeOfferings(), - "aws_ec2_local_gateway": dataSourceAwsEc2LocalGateway(), - "aws_ec2_local_gateways": dataSourceAwsEc2LocalGateways(), - "aws_ec2_local_gateway_route_table": dataSourceAwsEc2LocalGatewayRouteTable(), - "aws_ec2_local_gateway_route_tables": dataSourceAwsEc2LocalGatewayRouteTables(), - "aws_ec2_local_gateway_virtual_interface": dataSourceAwsEc2LocalGatewayVirtualInterface(), - "aws_ec2_local_gateway_virtual_interface_group": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroup(), - "aws_ec2_local_gateway_virtual_interface_groups": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroups(), - "aws_ec2_managed_prefix_list": dataSourceAwsEc2ManagedPrefixList(), - "aws_ec2_spot_price": dataSourceAwsEc2SpotPrice(), - "aws_ec2_transit_gateway": dataSourceAwsEc2TransitGateway(), - "aws_ec2_transit_gateway_dx_gateway_attachment": dataSourceAwsEc2TransitGatewayDxGatewayAttachment(), - "aws_ec2_transit_gateway_peering_attachment": dataSourceAwsEc2TransitGatewayPeeringAttachment(), - "aws_ec2_transit_gateway_route_table": dataSourceAwsEc2TransitGatewayRouteTable(), - "aws_ec2_transit_gateway_route_tables": dataSourceAwsEc2TransitGatewayRouteTables(), - "aws_ec2_transit_gateway_vpc_attachment": dataSourceAwsEc2TransitGatewayVpcAttachment(), - "aws_ec2_transit_gateway_vpn_attachment": dataSourceAwsEc2TransitGatewayVpnAttachment(), - "aws_ecr_authorization_token": dataSourceAwsEcrAuthorizationToken(), - "aws_ecr_image": dataSourceAwsEcrImage(), - "aws_ecr_repository": dataSourceAwsEcrRepository(), - "aws_ecs_cluster": dataSourceAwsEcsCluster(), - "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), - "aws_ecs_service": dataSourceAwsEcsService(), - "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), - "aws_customer_gateway": dataSourceAwsCustomerGateway(), - "aws_efs_access_point": dataSourceAwsEfsAccessPoint(), - "aws_efs_access_points": dataSourceAwsEfsAccessPoints(), - "aws_efs_file_system": dataSourceAwsEfsFileSystem(), - "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), - "aws_eip": dataSourceAwsEip(), - "aws_eks_addon": dataSourceAwsEksAddon(), - "aws_eks_cluster": dataSourceAwsEksCluster(), - "aws_eks_cluster_auth": dataSourceAwsEksClusterAuth(), - "aws_elastic_beanstalk_application": dataSourceAwsElasticBeanstalkApplication(), - "aws_elastic_beanstalk_hosted_zone": dataSourceAwsElasticBeanstalkHostedZone(), - "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), - "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), - "aws_elasticache_replication_group": dataSourceAwsElasticacheReplicationGroup(), - "aws_elasticache_user": dataSourceAwsElastiCacheUser(), - "aws_elasticsearch_domain": dataSourceAwsElasticSearchDomain(), - "aws_elb": dataSourceAwsElb(), - "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), - "aws_elb_service_account": dataSourceAwsElbServiceAccount(), - "aws_globalaccelerator_accelerator": dataSourceAwsGlobalAcceleratorAccelerator(), - "aws_glue_connection": dataSourceAwsGlueConnection(), - "aws_glue_data_catalog_encryption_settings": dataSourceAwsGlueDataCatalogEncryptionSettings(), - "aws_glue_script": dataSourceAwsGlueScript(), - "aws_guardduty_detector": dataSourceAwsGuarddutyDetector(), - "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), - "aws_iam_group": dataSourceAwsIAMGroup(), - "aws_iam_instance_profile": dataSourceAwsIAMInstanceProfile(), - "aws_iam_policy": dataSourceAwsIAMPolicy(), - "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), - "aws_iam_role": dataSourceAwsIAMRole(), - "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), - "aws_iam_session_context": dataSourceAwsIAMSessionContext(), - "aws_iam_user": dataSourceAwsIAMUser(), - "aws_identitystore_group": dataSourceAwsIdentityStoreGroup(), - "aws_identitystore_user": dataSourceAwsIdentityStoreUser(), - "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), - "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), - "aws_imagebuilder_image": dataSourceAwsImageBuilderImage(), - "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), - "aws_imagebuilder_image_recipe": dataSourceAwsImageBuilderImageRecipe(), - "aws_imagebuilder_infrastructure_configuration": datasourceAwsImageBuilderInfrastructureConfiguration(), - "aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(), - "aws_instance": dataSourceAwsInstance(), - "aws_instances": dataSourceAwsInstances(), - "aws_internet_gateway": dataSourceAwsInternetGateway(), - "aws_iot_endpoint": dataSourceAwsIotEndpoint(), - "aws_ip_ranges": dataSourceAwsIPRanges(), - "aws_kinesis_stream": dataSourceAwsKinesisStream(), - "aws_kinesis_stream_consumer": dataSourceAwsKinesisStreamConsumer(), - "aws_kms_alias": dataSourceAwsKmsAlias(), - "aws_kms_ciphertext": dataSourceAwsKmsCiphertext(), - "aws_kms_key": dataSourceAwsKmsKey(), - "aws_kms_public_key": dataSourceAwsKmsPublicKey(), - "aws_kms_secret": dataSourceAwsKmsSecret(), - "aws_kms_secrets": dataSourceAwsKmsSecrets(), - "aws_lakeformation_data_lake_settings": dataSourceAwsLakeFormationDataLakeSettings(), - "aws_lakeformation_permissions": dataSourceAwsLakeFormationPermissions(), - "aws_lakeformation_resource": dataSourceAwsLakeFormationResource(), - "aws_lambda_alias": dataSourceAwsLambdaAlias(), - "aws_lambda_code_signing_config": dataSourceAwsLambdaCodeSigningConfig(), - "aws_lambda_function": dataSourceAwsLambdaFunction(), - "aws_lambda_invocation": dataSourceAwsLambdaInvocation(), - "aws_lambda_layer_version": dataSourceAwsLambdaLayerVersion(), - "aws_launch_configuration": dataSourceAwsLaunchConfiguration(), - "aws_launch_template": dataSourceAwsLaunchTemplate(), - "aws_lex_bot_alias": dataSourceAwsLexBotAlias(), - "aws_lex_bot": dataSourceAwsLexBot(), - "aws_lex_intent": dataSourceAwsLexIntent(), - "aws_lex_slot_type": dataSourceAwsLexSlotType(), - "aws_mq_broker": dataSourceAwsMqBroker(), - "aws_msk_cluster": dataSourceAwsMskCluster(), - "aws_msk_configuration": dataSourceAwsMskConfiguration(), - "aws_nat_gateway": dataSourceAwsNatGateway(), - "aws_neptune_orderable_db_instance": dataSourceAwsNeptuneOrderableDbInstance(), - "aws_neptune_engine_version": dataSourceAwsNeptuneEngineVersion(), - "aws_network_acls": dataSourceAwsNetworkAcls(), - "aws_network_interface": dataSourceAwsNetworkInterface(), - "aws_network_interfaces": dataSourceAwsNetworkInterfaces(), - "aws_organizations_delegated_administrators": dataSourceAwsOrganizationsDelegatedAdministrators(), - "aws_organizations_delegated_services": dataSourceAwsOrganizationsDelegatedServices(), - "aws_organizations_organization": dataSourceAwsOrganizationsOrganization(), - "aws_organizations_organizational_units": dataSourceAwsOrganizationsOrganizationalUnits(), - "aws_outposts_outpost": dataSourceAwsOutpostsOutpost(), - "aws_outposts_outpost_instance_type": dataSourceAwsOutpostsOutpostInstanceType(), - "aws_outposts_outpost_instance_types": dataSourceAwsOutpostsOutpostInstanceTypes(), - "aws_outposts_outposts": dataSourceAwsOutpostsOutposts(), - "aws_outposts_site": dataSourceAwsOutpostsSite(), - "aws_outposts_sites": dataSourceAwsOutpostsSites(), - "aws_partition": dataSourceAwsPartition(), - "aws_prefix_list": dataSourceAwsPrefixList(), - "aws_pricing_product": dataSourceAwsPricingProduct(), - "aws_qldb_ledger": dataSourceAwsQLDBLedger(), - "aws_ram_resource_share": dataSourceAwsRamResourceShare(), - "aws_rds_certificate": dataSourceAwsRdsCertificate(), - "aws_rds_cluster": dataSourceAwsRdsCluster(), - "aws_rds_engine_version": dataSourceAwsRdsEngineVersion(), - "aws_rds_orderable_db_instance": dataSourceAwsRdsOrderableDbInstance(), - "aws_redshift_cluster": dataSourceAwsRedshiftCluster(), - "aws_redshift_orderable_cluster": dataSourceAwsRedshiftOrderableCluster(), - "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), - "aws_region": dataSourceAwsRegion(), - "aws_regions": dataSourceAwsRegions(), - "aws_resourcegroupstaggingapi_resources": dataSourceAwsResourceGroupsTaggingAPIResources(), - "aws_route": dataSourceAwsRoute(), - "aws_route_table": dataSourceAwsRouteTable(), - "aws_route_tables": dataSourceAwsRouteTables(), - "aws_route53_delegation_set": dataSourceAwsDelegationSet(), - "aws_route53_resolver_endpoint": dataSourceAwsRoute53ResolverEndpoint(), - "aws_route53_resolver_rule": dataSourceAwsRoute53ResolverRule(), - "aws_route53_resolver_rules": dataSourceAwsRoute53ResolverRules(), - "aws_route53_zone": dataSourceAwsRoute53Zone(), - "aws_s3_bucket": dataSourceAwsS3Bucket(), - "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), - "aws_s3_bucket_objects": dataSourceAwsS3BucketObjects(), - "aws_sagemaker_prebuilt_ecr_image": dataSourceAwsSageMakerPrebuiltECRImage(), - "aws_secretsmanager_secret": dataSourceAwsSecretsManagerSecret(), - "aws_secretsmanager_secret_rotation": dataSourceAwsSecretsManagerSecretRotation(), - "aws_secretsmanager_secret_version": dataSourceAwsSecretsManagerSecretVersion(), - "aws_servicecatalog_constraint": dataSourceAwsServiceCatalogConstraint(), - "aws_servicecatalog_launch_paths": dataSourceAwsServiceCatalogLaunchPaths(), - "aws_servicecatalog_portfolio_constraints": dataSourceAwsServiceCatalogPortfolioConstraints(), - "aws_servicecatalog_portfolio": dataSourceAwsServiceCatalogPortfolio(), - "aws_servicecatalog_product": dataSourceAwsServiceCatalogProduct(), - "aws_servicequotas_service": dataSourceAwsServiceQuotasService(), - "aws_servicequotas_service_quota": dataSourceAwsServiceQuotasServiceQuota(), - "aws_service_discovery_dns_namespace": dataSourceServiceDiscoveryDnsNamespace(), - "aws_sfn_activity": dataSourceAwsSfnActivity(), - "aws_sfn_state_machine": dataSourceAwsSfnStateMachine(), - "aws_signer_signing_job": dataSourceAwsSignerSigningJob(), - "aws_signer_signing_profile": dataSourceAwsSignerSigningProfile(), - "aws_sns_topic": dataSourceAwsSnsTopic(), - "aws_sqs_queue": dataSourceAwsSqsQueue(), - "aws_ssm_document": dataSourceAwsSsmDocument(), - "aws_ssm_parameter": dataSourceAwsSsmParameter(), - "aws_ssm_patch_baseline": dataSourceAwsSsmPatchBaseline(), - "aws_ssoadmin_instances": dataSourceAwsSsoAdminInstances(), - "aws_ssoadmin_permission_set": dataSourceAwsSsoAdminPermissionSet(), - "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), - "aws_subnet": dataSourceAwsSubnet(), - "aws_subnet_ids": dataSourceAwsSubnetIDs(), - "aws_transfer_server": dataSourceAwsTransferServer(), - "aws_vpcs": dataSourceAwsVpcs(), - "aws_security_group": dataSourceAwsSecurityGroup(), - "aws_security_groups": dataSourceAwsSecurityGroups(), - "aws_vpc": dataSourceAwsVpc(), - "aws_vpc_dhcp_options": dataSourceAwsVpcDhcpOptions(), - "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), - "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), - "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), - "aws_vpc_peering_connections": dataSourceAwsVpcPeeringConnections(), - "aws_vpn_gateway": dataSourceAwsVpnGateway(), - "aws_waf_ipset": dataSourceAwsWafIpSet(), - "aws_waf_rule": dataSourceAwsWafRule(), - "aws_waf_rate_based_rule": dataSourceAwsWafRateBasedRule(), - "aws_waf_web_acl": dataSourceAwsWafWebAcl(), - "aws_wafregional_ipset": dataSourceAwsWafRegionalIpSet(), - "aws_wafregional_rule": dataSourceAwsWafRegionalRule(), - "aws_wafregional_rate_based_rule": dataSourceAwsWafRegionalRateBasedRule(), - "aws_wafregional_web_acl": dataSourceAwsWafRegionalWebAcl(), - "aws_wafv2_ip_set": dataSourceAwsWafv2IPSet(), - "aws_wafv2_regex_pattern_set": dataSourceAwsWafv2RegexPatternSet(), - "aws_wafv2_rule_group": dataSourceAwsWafv2RuleGroup(), - "aws_wafv2_web_acl": dataSourceAwsWafv2WebACL(), - "aws_workspaces_bundle": dataSourceAwsWorkspacesBundle(), - "aws_workspaces_directory": dataSourceAwsWorkspacesDirectory(), - "aws_workspaces_image": dataSourceAwsWorkspacesImage(), - "aws_workspaces_workspace": dataSourceAwsWorkspacesWorkspace(), - - // Adding the Aliases for the ALB -> LB Rename - "aws_lb": dataSourceAwsLb(), - "aws_alb": dataSourceAwsLb(), - "aws_lb_listener": dataSourceAwsLbListener(), - "aws_alb_listener": dataSourceAwsLbListener(), - "aws_lb_target_group": dataSourceAwsLbTargetGroup(), - "aws_alb_target_group": dataSourceAwsLbTargetGroup(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "aws_accessanalyzer_analyzer": resourceAwsAccessAnalyzerAnalyzer(), - "aws_acm_certificate": resourceAwsAcmCertificate(), - "aws_acm_certificate_validation": resourceAwsAcmCertificateValidation(), - "aws_acmpca_certificate_authority": resourceAwsAcmpcaCertificateAuthority(), - "aws_acmpca_certificate_authority_certificate": resourceAwsAcmpcaCertificateAuthorityCertificate(), - "aws_acmpca_certificate": resourceAwsAcmpcaCertificate(), - "aws_ami": resourceAwsAmi(), - "aws_ami_copy": resourceAwsAmiCopy(), - "aws_ami_from_instance": resourceAwsAmiFromInstance(), - "aws_ami_launch_permission": resourceAwsAmiLaunchPermission(), - "aws_amplify_app": resourceAwsAmplifyApp(), - "aws_amplify_backend_environment": resourceAwsAmplifyBackendEnvironment(), - "aws_amplify_branch": resourceAwsAmplifyBranch(), - "aws_amplify_domain_association": resourceAwsAmplifyDomainAssociation(), - "aws_amplify_webhook": resourceAwsAmplifyWebhook(), - "aws_api_gateway_account": resourceAwsApiGatewayAccount(), - "aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(), - "aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(), - "aws_api_gateway_base_path_mapping": resourceAwsApiGatewayBasePathMapping(), - "aws_api_gateway_client_certificate": resourceAwsApiGatewayClientCertificate(), - "aws_api_gateway_deployment": resourceAwsApiGatewayDeployment(), - "aws_api_gateway_documentation_part": resourceAwsApiGatewayDocumentationPart(), - "aws_api_gateway_documentation_version": resourceAwsApiGatewayDocumentationVersion(), - "aws_api_gateway_domain_name": resourceAwsApiGatewayDomainName(), - "aws_api_gateway_gateway_response": resourceAwsApiGatewayGatewayResponse(), - "aws_api_gateway_integration": resourceAwsApiGatewayIntegration(), - "aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(), - "aws_api_gateway_method": resourceAwsApiGatewayMethod(), - "aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(), - "aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(), - "aws_api_gateway_model": resourceAwsApiGatewayModel(), - "aws_api_gateway_request_validator": resourceAwsApiGatewayRequestValidator(), - "aws_api_gateway_resource": resourceAwsApiGatewayResource(), - "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), - "aws_api_gateway_rest_api_policy": resourceAwsApiGatewayRestApiPolicy(), - "aws_api_gateway_stage": resourceAwsApiGatewayStage(), - "aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(), - "aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(), - "aws_api_gateway_vpc_link": resourceAwsApiGatewayVpcLink(), - "aws_apigatewayv2_api": resourceAwsApiGatewayV2Api(), - "aws_apigatewayv2_api_mapping": resourceAwsApiGatewayV2ApiMapping(), - "aws_apigatewayv2_authorizer": resourceAwsApiGatewayV2Authorizer(), - "aws_apigatewayv2_deployment": resourceAwsApiGatewayV2Deployment(), - "aws_apigatewayv2_domain_name": resourceAwsApiGatewayV2DomainName(), - "aws_apigatewayv2_integration": resourceAwsApiGatewayV2Integration(), - "aws_apigatewayv2_integration_response": resourceAwsApiGatewayV2IntegrationResponse(), - "aws_apigatewayv2_model": resourceAwsApiGatewayV2Model(), - "aws_apigatewayv2_route": resourceAwsApiGatewayV2Route(), - "aws_apigatewayv2_route_response": resourceAwsApiGatewayV2RouteResponse(), - "aws_apigatewayv2_stage": resourceAwsApiGatewayV2Stage(), - "aws_apigatewayv2_vpc_link": resourceAwsApiGatewayV2VpcLink(), - "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), - "aws_appautoscaling_target": resourceAwsAppautoscalingTarget(), - "aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(), - "aws_appautoscaling_scheduled_action": resourceAwsAppautoscalingScheduledAction(), - "aws_appconfig_application": resourceAwsAppconfigApplication(), - "aws_appconfig_configuration_profile": resourceAwsAppconfigConfigurationProfile(), - "aws_appconfig_deployment": resourceAwsAppconfigDeployment(), - "aws_appconfig_deployment_strategy": resourceAwsAppconfigDeploymentStrategy(), - "aws_appconfig_environment": resourceAwsAppconfigEnvironment(), - "aws_appconfig_hosted_configuration_version": resourceAwsAppconfigHostedConfigurationVersion(), - "aws_appmesh_gateway_route": resourceAwsAppmeshGatewayRoute(), - "aws_appmesh_mesh": resourceAwsAppmeshMesh(), - "aws_appmesh_route": resourceAwsAppmeshRoute(), - "aws_appmesh_virtual_gateway": resourceAwsAppmeshVirtualGateway(), - "aws_appmesh_virtual_node": resourceAwsAppmeshVirtualNode(), - "aws_appmesh_virtual_router": resourceAwsAppmeshVirtualRouter(), - "aws_appmesh_virtual_service": resourceAwsAppmeshVirtualService(), - "aws_apprunner_auto_scaling_configuration_version": resourceAwsAppRunnerAutoScalingConfigurationVersion(), - "aws_apprunner_connection": resourceAwsAppRunnerConnection(), - "aws_apprunner_custom_domain_association": resourceAwsAppRunnerCustomDomainAssociation(), - "aws_apprunner_service": resourceAwsAppRunnerService(), - "aws_appsync_api_key": resourceAwsAppsyncApiKey(), - "aws_appsync_datasource": resourceAwsAppsyncDatasource(), - "aws_appsync_function": resourceAwsAppsyncFunction(), - "aws_appsync_graphql_api": resourceAwsAppsyncGraphqlApi(), - "aws_appsync_resolver": resourceAwsAppsyncResolver(), - "aws_athena_database": resourceAwsAthenaDatabase(), - "aws_athena_named_query": resourceAwsAthenaNamedQuery(), - "aws_athena_workgroup": resourceAwsAthenaWorkgroup(), - "aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(), - "aws_autoscaling_group": resourceAwsAutoscalingGroup(), - "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), - "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), - "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), - "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(), - "aws_autoscalingplans_scaling_plan": resourceAwsAutoScalingPlansScalingPlan(), - "aws_backup_global_settings": resourceAwsBackupGlobalSettings(), - "aws_backup_plan": resourceAwsBackupPlan(), - "aws_backup_region_settings": resourceAwsBackupRegionSettings(), - "aws_backup_selection": resourceAwsBackupSelection(), - "aws_backup_vault": resourceAwsBackupVault(), - "aws_backup_vault_notifications": resourceAwsBackupVaultNotifications(), - "aws_backup_vault_policy": resourceAwsBackupVaultPolicy(), - "aws_budgets_budget": resourceAwsBudgetsBudget(), - "aws_budgets_budget_action": resourceAwsBudgetsBudgetAction(), - "aws_cloud9_environment_ec2": resourceAwsCloud9EnvironmentEc2(), - "aws_cloudformation_stack": resourceAwsCloudFormationStack(), - "aws_cloudformation_stack_set": resourceAwsCloudFormationStackSet(), - "aws_cloudformation_stack_set_instance": resourceAwsCloudFormationStackSetInstance(), - "aws_cloudformation_type": resourceAwsCloudFormationType(), - "aws_cloudfront_cache_policy": resourceAwsCloudFrontCachePolicy(), - "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), - "aws_cloudfront_function": resourceAwsCloudFrontFunction(), - "aws_cloudfront_key_group": resourceAwsCloudFrontKeyGroup(), - "aws_cloudfront_monitoring_subscription": resourceAwsCloudFrontMonitoringSubscription(), - "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), - "aws_cloudfront_origin_request_policy": resourceAwsCloudFrontOriginRequestPolicy(), - "aws_cloudfront_public_key": resourceAwsCloudFrontPublicKey(), - "aws_cloudfront_realtime_log_config": resourceAwsCloudFrontRealtimeLogConfig(), - "aws_cloudtrail": resourceAwsCloudTrail(), - "aws_cloudwatch_event_bus": resourceAwsCloudWatchEventBus(), - "aws_cloudwatch_event_bus_policy": resourceAwsCloudWatchEventBusPolicy(), - "aws_cloudwatch_event_permission": resourceAwsCloudWatchEventPermission(), - "aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(), - "aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(), - "aws_cloudwatch_event_archive": resourceAwsCloudWatchEventArchive(), - "aws_cloudwatch_event_connection": resourceAwsCloudWatchEventConnection(), - "aws_cloudwatch_event_api_destination": resourceAwsCloudWatchEventApiDestination(), - "aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(), - "aws_cloudwatch_log_destination_policy": resourceAwsCloudWatchLogDestinationPolicy(), - "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), - "aws_cloudwatch_log_metric_filter": resourceAwsCloudWatchLogMetricFilter(), - "aws_cloudwatch_log_resource_policy": resourceAwsCloudWatchLogResourcePolicy(), - "aws_cloudwatch_log_stream": resourceAwsCloudWatchLogStream(), - "aws_cloudwatch_log_subscription_filter": resourceAwsCloudwatchLogSubscriptionFilter(), - "aws_config_aggregate_authorization": resourceAwsConfigAggregateAuthorization(), - "aws_config_config_rule": resourceAwsConfigConfigRule(), - "aws_config_configuration_aggregator": resourceAwsConfigConfigurationAggregator(), - "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), - "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), - "aws_config_conformance_pack": resourceAwsConfigConformancePack(), - "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), - "aws_config_organization_conformance_pack": resourceAwsConfigOrganizationConformancePack(), - "aws_config_organization_custom_rule": resourceAwsConfigOrganizationCustomRule(), - "aws_config_organization_managed_rule": resourceAwsConfigOrganizationManagedRule(), - "aws_config_remediation_configuration": resourceAwsConfigRemediationConfiguration(), - "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), - "aws_cognito_identity_pool_roles_attachment": resourceAwsCognitoIdentityPoolRolesAttachment(), - "aws_cognito_identity_provider": resourceAwsCognitoIdentityProvider(), - "aws_cognito_resource_server": resourceAwsCognitoResourceServer(), - "aws_cognito_user_group": resourceAwsCognitoUserGroup(), - "aws_cognito_user_pool": resourceAwsCognitoUserPool(), - "aws_cognito_user_pool_client": resourceAwsCognitoUserPoolClient(), - "aws_cognito_user_pool_domain": resourceAwsCognitoUserPoolDomain(), - "aws_cognito_user_pool_ui_customization": resourceAwsCognitoUserPoolUICustomization(), - "aws_cloudhsm_v2_cluster": resourceAwsCloudHsmV2Cluster(), - "aws_cloudhsm_v2_hsm": resourceAwsCloudHsmV2Hsm(), - "aws_cloudwatch_composite_alarm": resourceAwsCloudWatchCompositeAlarm(), - "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), - "aws_cloudwatch_dashboard": resourceAwsCloudWatchDashboard(), - "aws_cloudwatch_metric_stream": resourceAwsCloudWatchMetricStream(), - "aws_cloudwatch_query_definition": resourceAwsCloudWatchQueryDefinition(), - "aws_codedeploy_app": resourceAwsCodeDeployApp(), - "aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(), - "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), - "aws_codecommit_repository": resourceAwsCodeCommitRepository(), - "aws_codecommit_trigger": resourceAwsCodeCommitTrigger(), - "aws_codeartifact_domain": resourceAwsCodeArtifactDomain(), - "aws_codeartifact_domain_permissions_policy": resourceAwsCodeArtifactDomainPermissionsPolicy(), - "aws_codeartifact_repository": resourceAwsCodeArtifactRepository(), - "aws_codeartifact_repository_permissions_policy": resourceAwsCodeArtifactRepositoryPermissionsPolicy(), - "aws_codebuild_project": resourceAwsCodeBuildProject(), - "aws_codebuild_report_group": resourceAwsCodeBuildReportGroup(), - "aws_codebuild_source_credential": resourceAwsCodeBuildSourceCredential(), - "aws_codebuild_webhook": resourceAwsCodeBuildWebhook(), - "aws_codepipeline": resourceAwsCodePipeline(), - "aws_codepipeline_webhook": resourceAwsCodePipelineWebhook(), - "aws_codestarconnections_connection": resourceAwsCodeStarConnectionsConnection(), - "aws_codestarconnections_host": resourceAwsCodeStarConnectionsHost(), - "aws_codestarnotifications_notification_rule": resourceAwsCodeStarNotificationsNotificationRule(), - "aws_cur_report_definition": resourceAwsCurReportDefinition(), - "aws_customer_gateway": resourceAwsCustomerGateway(), - "aws_datapipeline_pipeline": resourceAwsDataPipelinePipeline(), - "aws_datasync_agent": resourceAwsDataSyncAgent(), - "aws_datasync_location_efs": resourceAwsDataSyncLocationEfs(), - "aws_datasync_location_fsx_windows_file_system": resourceAwsDataSyncLocationFsxWindowsFileSystem(), - "aws_datasync_location_nfs": resourceAwsDataSyncLocationNfs(), - "aws_datasync_location_s3": resourceAwsDataSyncLocationS3(), - "aws_datasync_location_smb": resourceAwsDataSyncLocationSmb(), - "aws_datasync_task": resourceAwsDataSyncTask(), - "aws_dax_cluster": resourceAwsDaxCluster(), - "aws_dax_parameter_group": resourceAwsDaxParameterGroup(), - "aws_dax_subnet_group": resourceAwsDaxSubnetGroup(), - "aws_db_cluster_snapshot": resourceAwsDbClusterSnapshot(), - "aws_db_event_subscription": resourceAwsDbEventSubscription(), - "aws_db_instance": resourceAwsDbInstance(), - "aws_db_instance_role_association": resourceAwsDbInstanceRoleAssociation(), - "aws_db_option_group": resourceAwsDbOptionGroup(), - "aws_db_parameter_group": resourceAwsDbParameterGroup(), - "aws_db_proxy": resourceAwsDbProxy(), - "aws_db_proxy_default_target_group": resourceAwsDbProxyDefaultTargetGroup(), - "aws_db_proxy_endpoint": resourceAwsDbProxyEndpoint(), - "aws_db_proxy_target": resourceAwsDbProxyTarget(), - "aws_db_security_group": resourceAwsDbSecurityGroup(), - "aws_db_snapshot": resourceAwsDbSnapshot(), - "aws_db_subnet_group": resourceAwsDbSubnetGroup(), - "aws_devicefarm_project": resourceAwsDevicefarmProject(), - "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), - "aws_directory_service_conditional_forwarder": resourceAwsDirectoryServiceConditionalForwarder(), - "aws_directory_service_log_subscription": resourceAwsDirectoryServiceLogSubscription(), - "aws_dlm_lifecycle_policy": resourceAwsDlmLifecyclePolicy(), - "aws_dms_certificate": resourceAwsDmsCertificate(), - "aws_dms_endpoint": resourceAwsDmsEndpoint(), - "aws_dms_event_subscription": resourceAwsDmsEventSubscription(), - "aws_dms_replication_instance": resourceAwsDmsReplicationInstance(), - "aws_dms_replication_subnet_group": resourceAwsDmsReplicationSubnetGroup(), - "aws_dms_replication_task": resourceAwsDmsReplicationTask(), - "aws_docdb_cluster": resourceAwsDocDBCluster(), - "aws_docdb_cluster_instance": resourceAwsDocDBClusterInstance(), - "aws_docdb_cluster_parameter_group": resourceAwsDocDBClusterParameterGroup(), - "aws_docdb_cluster_snapshot": resourceAwsDocDBClusterSnapshot(), - "aws_docdb_subnet_group": resourceAwsDocDBSubnetGroup(), - "aws_dx_bgp_peer": resourceAwsDxBgpPeer(), - "aws_dx_connection": resourceAwsDxConnection(), - "aws_dx_connection_association": resourceAwsDxConnectionAssociation(), - "aws_dx_gateway": resourceAwsDxGateway(), - "aws_dx_gateway_association": resourceAwsDxGatewayAssociation(), - "aws_dx_gateway_association_proposal": resourceAwsDxGatewayAssociationProposal(), - "aws_dx_hosted_private_virtual_interface": resourceAwsDxHostedPrivateVirtualInterface(), - "aws_dx_hosted_private_virtual_interface_accepter": resourceAwsDxHostedPrivateVirtualInterfaceAccepter(), - "aws_dx_hosted_public_virtual_interface": resourceAwsDxHostedPublicVirtualInterface(), - "aws_dx_hosted_public_virtual_interface_accepter": resourceAwsDxHostedPublicVirtualInterfaceAccepter(), - "aws_dx_hosted_transit_virtual_interface": resourceAwsDxHostedTransitVirtualInterface(), - "aws_dx_hosted_transit_virtual_interface_accepter": resourceAwsDxHostedTransitVirtualInterfaceAccepter(), - "aws_dx_lag": resourceAwsDxLag(), - "aws_dx_private_virtual_interface": resourceAwsDxPrivateVirtualInterface(), - "aws_dx_public_virtual_interface": resourceAwsDxPublicVirtualInterface(), - "aws_dx_transit_virtual_interface": resourceAwsDxTransitVirtualInterface(), - "aws_dynamodb_table": resourceAwsDynamoDbTable(), - "aws_dynamodb_table_item": resourceAwsDynamoDbTableItem(), - "aws_dynamodb_global_table": resourceAwsDynamoDbGlobalTable(), - "aws_dynamodb_kinesis_streaming_destination": resourceAwsDynamoDbKinesisStreamingDestination(), - "aws_ebs_default_kms_key": resourceAwsEbsDefaultKmsKey(), - "aws_ebs_encryption_by_default": resourceAwsEbsEncryptionByDefault(), - "aws_ebs_snapshot": resourceAwsEbsSnapshot(), - "aws_ebs_snapshot_copy": resourceAwsEbsSnapshotCopy(), - "aws_ebs_snapshot_import": resourceAwsEbsSnapshotImport(), - "aws_ebs_volume": resourceAwsEbsVolume(), - "aws_ec2_availability_zone_group": resourceAwsEc2AvailabilityZoneGroup(), - "aws_ec2_capacity_reservation": resourceAwsEc2CapacityReservation(), - "aws_ec2_carrier_gateway": resourceAwsEc2CarrierGateway(), - "aws_ec2_client_vpn_authorization_rule": resourceAwsEc2ClientVpnAuthorizationRule(), - "aws_ec2_client_vpn_endpoint": resourceAwsEc2ClientVpnEndpoint(), - "aws_ec2_client_vpn_network_association": resourceAwsEc2ClientVpnNetworkAssociation(), - "aws_ec2_client_vpn_route": resourceAwsEc2ClientVpnRoute(), - "aws_ec2_fleet": resourceAwsEc2Fleet(), - "aws_ec2_local_gateway_route": resourceAwsEc2LocalGatewayRoute(), - "aws_ec2_local_gateway_route_table_vpc_association": resourceAwsEc2LocalGatewayRouteTableVpcAssociation(), - "aws_ec2_managed_prefix_list": resourceAwsEc2ManagedPrefixList(), - "aws_ec2_tag": resourceAwsEc2Tag(), - "aws_ec2_traffic_mirror_filter": resourceAwsEc2TrafficMirrorFilter(), - "aws_ec2_traffic_mirror_filter_rule": resourceAwsEc2TrafficMirrorFilterRule(), - "aws_ec2_traffic_mirror_target": resourceAwsEc2TrafficMirrorTarget(), - "aws_ec2_traffic_mirror_session": resourceAwsEc2TrafficMirrorSession(), - "aws_ec2_transit_gateway": resourceAwsEc2TransitGateway(), - "aws_ec2_transit_gateway_peering_attachment": resourceAwsEc2TransitGatewayPeeringAttachment(), - "aws_ec2_transit_gateway_peering_attachment_accepter": resourceAwsEc2TransitGatewayPeeringAttachmentAccepter(), - "aws_ec2_transit_gateway_prefix_list_reference": resourceAwsEc2TransitGatewayPrefixListReference(), - "aws_ec2_transit_gateway_route": resourceAwsEc2TransitGatewayRoute(), - "aws_ec2_transit_gateway_route_table": resourceAwsEc2TransitGatewayRouteTable(), - "aws_ec2_transit_gateway_route_table_association": resourceAwsEc2TransitGatewayRouteTableAssociation(), - "aws_ec2_transit_gateway_route_table_propagation": resourceAwsEc2TransitGatewayRouteTablePropagation(), - "aws_ec2_transit_gateway_vpc_attachment": resourceAwsEc2TransitGatewayVpcAttachment(), - "aws_ec2_transit_gateway_vpc_attachment_accepter": resourceAwsEc2TransitGatewayVpcAttachmentAccepter(), - "aws_ecr_lifecycle_policy": resourceAwsEcrLifecyclePolicy(), - "aws_ecrpublic_repository": resourceAwsEcrPublicRepository(), - "aws_ecr_registry_policy": resourceAwsEcrRegistryPolicy(), - "aws_ecr_replication_configuration": resourceAwsEcrReplicationConfiguration(), - "aws_ecr_repository": resourceAwsEcrRepository(), - "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), - "aws_ecs_capacity_provider": resourceAwsEcsCapacityProvider(), - "aws_ecs_cluster": resourceAwsEcsCluster(), - "aws_ecs_service": resourceAwsEcsService(), - "aws_ecs_task_definition": resourceAwsEcsTaskDefinition(), - "aws_efs_access_point": resourceAwsEfsAccessPoint(), - "aws_efs_backup_policy": resourceAwsEfsBackupPolicy(), - "aws_efs_file_system": resourceAwsEfsFileSystem(), - "aws_efs_file_system_policy": resourceAwsEfsFileSystemPolicy(), - "aws_efs_mount_target": resourceAwsEfsMountTarget(), - "aws_egress_only_internet_gateway": resourceAwsEgressOnlyInternetGateway(), - "aws_eip": resourceAwsEip(), - "aws_eip_association": resourceAwsEipAssociation(), - "aws_eks_cluster": resourceAwsEksCluster(), - "aws_eks_addon": resourceAwsEksAddon(), - "aws_eks_fargate_profile": resourceAwsEksFargateProfile(), - "aws_eks_identity_provider_config": resourceAwsEksIdentityProviderConfig(), - "aws_eks_node_group": resourceAwsEksNodeGroup(), - "aws_elasticache_cluster": resourceAwsElasticacheCluster(), - "aws_elasticache_global_replication_group": resourceAwsElasticacheGlobalReplicationGroup(), - "aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(), - "aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(), - "aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(), - "aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(), - "aws_elasticache_user": resourceAwsElasticacheUser(), - "aws_elasticache_user_group": resourceAwsElasticacheUserGroup(), - "aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(), - "aws_elastic_beanstalk_application_version": resourceAwsElasticBeanstalkApplicationVersion(), - "aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(), - "aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(), - "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), - "aws_elasticsearch_domain_policy": resourceAwsElasticSearchDomainPolicy(), - "aws_elasticsearch_domain_saml_options": resourceAwsElasticSearchDomainSAMLOptions(), - "aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(), - "aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(), - "aws_elb": resourceAwsElb(), - "aws_elb_attachment": resourceAwsElbAttachment(), - "aws_emr_cluster": resourceAwsEMRCluster(), - "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), - "aws_emr_instance_fleet": resourceAwsEMRInstanceFleet(), - "aws_emr_managed_scaling_policy": resourceAwsEMRManagedScalingPolicy(), - "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), - "aws_flow_log": resourceAwsFlowLog(), - "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), - "aws_fsx_windows_file_system": resourceAwsFsxWindowsFileSystem(), - "aws_fms_admin_account": resourceAwsFmsAdminAccount(), - "aws_fms_policy": resourceAwsFmsPolicy(), - "aws_gamelift_alias": resourceAwsGameliftAlias(), - "aws_gamelift_build": resourceAwsGameliftBuild(), - "aws_gamelift_fleet": resourceAwsGameliftFleet(), - "aws_gamelift_game_session_queue": resourceAwsGameliftGameSessionQueue(), - "aws_glacier_vault": resourceAwsGlacierVault(), - "aws_glacier_vault_lock": resourceAwsGlacierVaultLock(), - "aws_globalaccelerator_accelerator": resourceAwsGlobalAcceleratorAccelerator(), - "aws_globalaccelerator_endpoint_group": resourceAwsGlobalAcceleratorEndpointGroup(), - "aws_globalaccelerator_listener": resourceAwsGlobalAcceleratorListener(), - "aws_glue_catalog_database": resourceAwsGlueCatalogDatabase(), - "aws_glue_catalog_table": resourceAwsGlueCatalogTable(), - "aws_glue_classifier": resourceAwsGlueClassifier(), - "aws_glue_connection": resourceAwsGlueConnection(), - "aws_glue_dev_endpoint": resourceAwsGlueDevEndpoint(), - "aws_glue_crawler": resourceAwsGlueCrawler(), - "aws_glue_data_catalog_encryption_settings": resourceAwsGlueDataCatalogEncryptionSettings(), - "aws_glue_job": resourceAwsGlueJob(), - "aws_glue_ml_transform": resourceAwsGlueMLTransform(), - "aws_glue_partition": resourceAwsGluePartition(), - "aws_glue_registry": resourceAwsGlueRegistry(), - "aws_glue_resource_policy": resourceAwsGlueResourcePolicy(), - "aws_glue_schema": resourceAwsGlueSchema(), - "aws_glue_security_configuration": resourceAwsGlueSecurityConfiguration(), - "aws_glue_trigger": resourceAwsGlueTrigger(), - "aws_glue_user_defined_function": resourceAwsGlueUserDefinedFunction(), - "aws_glue_workflow": resourceAwsGlueWorkflow(), - "aws_guardduty_detector": resourceAwsGuardDutyDetector(), - "aws_guardduty_filter": resourceAwsGuardDutyFilter(), - "aws_guardduty_invite_accepter": resourceAwsGuardDutyInviteAccepter(), - "aws_guardduty_ipset": resourceAwsGuardDutyIpset(), - "aws_guardduty_member": resourceAwsGuardDutyMember(), - "aws_guardduty_organization_admin_account": resourceAwsGuardDutyOrganizationAdminAccount(), - "aws_guardduty_organization_configuration": resourceAwsGuardDutyOrganizationConfiguration(), - "aws_guardduty_publishing_destination": resourceAwsGuardDutyPublishingDestination(), - "aws_guardduty_threatintelset": resourceAwsGuardDutyThreatintelset(), - "aws_iam_access_key": resourceAwsIamAccessKey(), - "aws_iam_account_alias": resourceAwsIamAccountAlias(), - "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), - "aws_iam_group_policy": resourceAwsIamGroupPolicy(), - "aws_iam_group": resourceAwsIamGroup(), - "aws_iam_group_membership": resourceAwsIamGroupMembership(), - "aws_iam_group_policy_attachment": resourceAwsIamGroupPolicyAttachment(), - "aws_iam_instance_profile": resourceAwsIamInstanceProfile(), - "aws_iam_openid_connect_provider": resourceAwsIamOpenIDConnectProvider(), - "aws_iam_policy": resourceAwsIamPolicy(), - "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), - "aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(), - "aws_iam_role_policy": resourceAwsIamRolePolicy(), - "aws_iam_role": resourceAwsIamRole(), - "aws_iam_saml_provider": resourceAwsIamSamlProvider(), - "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), - "aws_iam_service_linked_role": resourceAwsIamServiceLinkedRole(), - "aws_iam_user_group_membership": resourceAwsIamUserGroupMembership(), - "aws_iam_user_policy_attachment": resourceAwsIamUserPolicyAttachment(), - "aws_iam_user_policy": resourceAwsIamUserPolicy(), - "aws_iam_user_ssh_key": resourceAwsIamUserSshKey(), - "aws_iam_user": resourceAwsIamUser(), - "aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(), - "aws_imagebuilder_component": resourceAwsImageBuilderComponent(), - "aws_imagebuilder_distribution_configuration": resourceAwsImageBuilderDistributionConfiguration(), - "aws_imagebuilder_image": resourceAwsImageBuilderImage(), - "aws_imagebuilder_image_pipeline": resourceAwsImageBuilderImagePipeline(), - "aws_imagebuilder_image_recipe": resourceAwsImageBuilderImageRecipe(), - "aws_imagebuilder_infrastructure_configuration": resourceAwsImageBuilderInfrastructureConfiguration(), - "aws_inspector_assessment_target": resourceAWSInspectorAssessmentTarget(), - "aws_inspector_assessment_template": resourceAWSInspectorAssessmentTemplate(), - "aws_inspector_resource_group": resourceAWSInspectorResourceGroup(), - "aws_instance": resourceAwsInstance(), - "aws_internet_gateway": resourceAwsInternetGateway(), - "aws_iot_certificate": resourceAwsIotCertificate(), - "aws_iot_policy": resourceAwsIotPolicy(), - "aws_iot_policy_attachment": resourceAwsIotPolicyAttachment(), - "aws_iot_thing": resourceAwsIotThing(), - "aws_iot_thing_principal_attachment": resourceAwsIotThingPrincipalAttachment(), - "aws_iot_thing_type": resourceAwsIotThingType(), - "aws_iot_topic_rule": resourceAwsIotTopicRule(), - "aws_iot_role_alias": resourceAwsIotRoleAlias(), - "aws_key_pair": resourceAwsKeyPair(), - "aws_kinesis_analytics_application": resourceAwsKinesisAnalyticsApplication(), - "aws_kinesisanalyticsv2_application": resourceAwsKinesisAnalyticsV2Application(), - "aws_kinesisanalyticsv2_application_snapshot": resourceAwsKinesisAnalyticsV2ApplicationSnapshot(), - "aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(), - "aws_kinesis_stream": resourceAwsKinesisStream(), - "aws_kinesis_stream_consumer": resourceAwsKinesisStreamConsumer(), - "aws_kinesis_video_stream": resourceAwsKinesisVideoStream(), - "aws_kms_alias": resourceAwsKmsAlias(), - "aws_kms_external_key": resourceAwsKmsExternalKey(), - "aws_kms_grant": resourceAwsKmsGrant(), - "aws_kms_key": resourceAwsKmsKey(), - "aws_kms_ciphertext": resourceAwsKmsCiphertext(), - "aws_lakeformation_data_lake_settings": resourceAwsLakeFormationDataLakeSettings(), - "aws_lakeformation_permissions": resourceAwsLakeFormationPermissions(), - "aws_lakeformation_resource": resourceAwsLakeFormationResource(), - "aws_lambda_alias": resourceAwsLambdaAlias(), - "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), - "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), - "aws_lambda_function_event_invoke_config": resourceAwsLambdaFunctionEventInvokeConfig(), - "aws_lambda_function": resourceAwsLambdaFunction(), - "aws_lambda_layer_version": resourceAwsLambdaLayerVersion(), - "aws_lambda_permission": resourceAwsLambdaPermission(), - "aws_lambda_provisioned_concurrency_config": resourceAwsLambdaProvisionedConcurrencyConfig(), - "aws_launch_configuration": resourceAwsLaunchConfiguration(), - "aws_launch_template": resourceAwsLaunchTemplate(), - "aws_lex_bot": resourceAwsLexBot(), - "aws_lex_bot_alias": resourceAwsLexBotAlias(), - "aws_lex_intent": resourceAwsLexIntent(), - "aws_lex_slot_type": resourceAwsLexSlotType(), - "aws_licensemanager_association": resourceAwsLicenseManagerAssociation(), - "aws_licensemanager_license_configuration": resourceAwsLicenseManagerLicenseConfiguration(), - "aws_lightsail_domain": resourceAwsLightsailDomain(), - "aws_lightsail_instance": resourceAwsLightsailInstance(), - "aws_lightsail_instance_public_ports": resourceAwsLightsailInstancePublicPorts(), - "aws_lightsail_key_pair": resourceAwsLightsailKeyPair(), - "aws_lightsail_static_ip": resourceAwsLightsailStaticIp(), - "aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(), - "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), - "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), - "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), - "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), - "aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(), - "aws_macie2_account": resourceAwsMacie2Account(), - "aws_macie2_classification_job": resourceAwsMacie2ClassificationJob(), - "aws_macie2_custom_data_identifier": resourceAwsMacie2CustomDataIdentifier(), - "aws_macie2_findings_filter": resourceAwsMacie2FindingsFilter(), - "aws_macie2_invitation_accepter": resourceAwsMacie2InvitationAccepter(), - "aws_macie2_member": resourceAwsMacie2Member(), - "aws_macie2_organization_admin_account": resourceAwsMacie2OrganizationAdminAccount(), - "aws_macie_member_account_association": resourceAwsMacieMemberAccountAssociation(), - "aws_macie_s3_bucket_association": resourceAwsMacieS3BucketAssociation(), - "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), - "aws_mq_broker": resourceAwsMqBroker(), - "aws_mq_configuration": resourceAwsMqConfiguration(), - "aws_media_convert_queue": resourceAwsMediaConvertQueue(), - "aws_media_package_channel": resourceAwsMediaPackageChannel(), - "aws_media_store_container": resourceAwsMediaStoreContainer(), - "aws_media_store_container_policy": resourceAwsMediaStoreContainerPolicy(), - "aws_msk_cluster": resourceAwsMskCluster(), - "aws_msk_configuration": resourceAwsMskConfiguration(), - "aws_msk_scram_secret_association": resourceAwsMskScramSecretAssociation(), - "aws_mwaa_environment": resourceAwsMwaaEnvironment(), - "aws_nat_gateway": resourceAwsNatGateway(), - "aws_network_acl": resourceAwsNetworkAcl(), - "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), - "aws_neptune_cluster": resourceAwsNeptuneCluster(), - "aws_neptune_cluster_endpoint": resourceAwsNeptuneClusterEndpoint(), - "aws_neptune_cluster_instance": resourceAwsNeptuneClusterInstance(), - "aws_neptune_cluster_parameter_group": resourceAwsNeptuneClusterParameterGroup(), - "aws_neptune_cluster_snapshot": resourceAwsNeptuneClusterSnapshot(), - "aws_neptune_event_subscription": resourceAwsNeptuneEventSubscription(), - "aws_neptune_parameter_group": resourceAwsNeptuneParameterGroup(), - "aws_neptune_subnet_group": resourceAwsNeptuneSubnetGroup(), - "aws_network_acl_rule": resourceAwsNetworkAclRule(), - "aws_network_interface": resourceAwsNetworkInterface(), - "aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(), - "aws_networkfirewall_firewall": resourceAwsNetworkFirewallFirewall(), - "aws_networkfirewall_firewall_policy": resourceAwsNetworkFirewallFirewallPolicy(), - "aws_networkfirewall_logging_configuration": resourceAwsNetworkFirewallLoggingConfiguration(), - "aws_networkfirewall_resource_policy": resourceAwsNetworkFirewallResourcePolicy(), - "aws_networkfirewall_rule_group": resourceAwsNetworkFirewallRuleGroup(), - "aws_opsworks_application": resourceAwsOpsworksApplication(), - "aws_opsworks_stack": resourceAwsOpsworksStack(), - "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), - "aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(), - "aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(), - "aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(), - "aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(), - "aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(), - "aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(), - "aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(), - "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), - "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), - "aws_opsworks_instance": resourceAwsOpsworksInstance(), - "aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(), - "aws_opsworks_permission": resourceAwsOpsworksPermission(), - "aws_opsworks_rds_db_instance": resourceAwsOpsworksRdsDbInstance(), - "aws_organizations_organization": resourceAwsOrganizationsOrganization(), - "aws_organizations_account": resourceAwsOrganizationsAccount(), - "aws_organizations_delegated_administrator": resourceAwsOrganizationsDelegatedAdministrator(), - "aws_organizations_policy": resourceAwsOrganizationsPolicy(), - "aws_organizations_policy_attachment": resourceAwsOrganizationsPolicyAttachment(), - "aws_organizations_organizational_unit": resourceAwsOrganizationsOrganizationalUnit(), - "aws_placement_group": resourceAwsPlacementGroup(), - "aws_prometheus_workspace": resourceAwsPrometheusWorkspace(), - "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), - "aws_qldb_ledger": resourceAwsQLDBLedger(), - "aws_quicksight_group": resourceAwsQuickSightGroup(), - "aws_quicksight_user": resourceAwsQuickSightUser(), - "aws_ram_principal_association": resourceAwsRamPrincipalAssociation(), - "aws_ram_resource_association": resourceAwsRamResourceAssociation(), - "aws_ram_resource_share": resourceAwsRamResourceShare(), - "aws_ram_resource_share_accepter": resourceAwsRamResourceShareAccepter(), - "aws_rds_cluster": resourceAwsRDSCluster(), - "aws_rds_cluster_endpoint": resourceAwsRDSClusterEndpoint(), - "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), - "aws_rds_cluster_parameter_group": resourceAwsRDSClusterParameterGroup(), - "aws_rds_cluster_role_association": resourceAwsRDSClusterRoleAssociation(), - "aws_rds_global_cluster": resourceAwsRDSGlobalCluster(), - "aws_redshift_cluster": resourceAwsRedshiftCluster(), - "aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(), - "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), - "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), - "aws_redshift_snapshot_copy_grant": resourceAwsRedshiftSnapshotCopyGrant(), - "aws_redshift_snapshot_schedule": resourceAwsRedshiftSnapshotSchedule(), - "aws_redshift_snapshot_schedule_association": resourceAwsRedshiftSnapshotScheduleAssociation(), - "aws_redshift_event_subscription": resourceAwsRedshiftEventSubscription(), - "aws_resourcegroups_group": resourceAwsResourceGroupsGroup(), - "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), - "aws_route53_hosted_zone_dnssec": resourceAwsRoute53HostedZoneDnssec(), - "aws_route53_key_signing_key": resourceAwsRoute53KeySigningKey(), - "aws_route53_query_log": resourceAwsRoute53QueryLog(), - "aws_route53_record": resourceAwsRoute53Record(), - "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), - "aws_route53_vpc_association_authorization": resourceAwsRoute53VPCAssociationAuthorization(), - "aws_route53_zone": resourceAwsRoute53Zone(), - "aws_route53_health_check": resourceAwsRoute53HealthCheck(), - "aws_route53_resolver_dnssec_config": resourceAwsRoute53ResolverDnssecConfig(), - "aws_route53_resolver_endpoint": resourceAwsRoute53ResolverEndpoint(), - "aws_route53_resolver_firewall_config": resourceAwsRoute53ResolverFirewallConfig(), - "aws_route53_resolver_firewall_domain_list": resourceAwsRoute53ResolverFirewallDomainList(), - "aws_route53_resolver_firewall_rule": resourceAwsRoute53ResolverFirewallRule(), - "aws_route53_resolver_firewall_rule_group": resourceAwsRoute53ResolverFirewallRuleGroup(), - "aws_route53_resolver_firewall_rule_group_association": resourceAwsRoute53ResolverFirewallRuleGroupAssociation(), - "aws_route53_resolver_query_log_config": resourceAwsRoute53ResolverQueryLogConfig(), - "aws_route53_resolver_query_log_config_association": resourceAwsRoute53ResolverQueryLogConfigAssociation(), - "aws_route53_resolver_rule_association": resourceAwsRoute53ResolverRuleAssociation(), - "aws_route53_resolver_rule": resourceAwsRoute53ResolverRule(), - "aws_route": resourceAwsRoute(), - "aws_route_table": resourceAwsRouteTable(), - "aws_default_route_table": resourceAwsDefaultRouteTable(), - "aws_route_table_association": resourceAwsRouteTableAssociation(), - "aws_sagemaker_app": resourceAwsSagemakerApp(), - "aws_sagemaker_app_image_config": resourceAwsSagemakerAppImageConfig(), - "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), - "aws_sagemaker_domain": resourceAwsSagemakerDomain(), - "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), - "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), - "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), - "aws_sagemaker_image": resourceAwsSagemakerImage(), - "aws_sagemaker_image_version": resourceAwsSagemakerImageVersion(), - "aws_sagemaker_model": resourceAwsSagemakerModel(), - "aws_sagemaker_model_package_group": resourceAwsSagemakerModelPackageGroup(), - "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), - "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), - "aws_sagemaker_user_profile": resourceAwsSagemakerUserProfile(), - "aws_sagemaker_workforce": resourceAwsSagemakerWorkforce(), - "aws_sagemaker_workteam": resourceAwsSagemakerWorkteam(), - "aws_schemas_discoverer": resourceAwsSchemasDiscoverer(), - "aws_schemas_registry": resourceAwsSchemasRegistry(), - "aws_schemas_schema": resourceAwsSchemasSchema(), - "aws_secretsmanager_secret": resourceAwsSecretsManagerSecret(), - "aws_secretsmanager_secret_policy": resourceAwsSecretsManagerSecretPolicy(), - "aws_secretsmanager_secret_version": resourceAwsSecretsManagerSecretVersion(), - "aws_secretsmanager_secret_rotation": resourceAwsSecretsManagerSecretRotation(), - "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), - "aws_ses_domain_identity": resourceAwsSesDomainIdentity(), - "aws_ses_domain_identity_verification": resourceAwsSesDomainIdentityVerification(), - "aws_ses_domain_dkim": resourceAwsSesDomainDkim(), - "aws_ses_domain_mail_from": resourceAwsSesDomainMailFrom(), - "aws_ses_email_identity": resourceAwsSesEmailIdentity(), - "aws_ses_identity_policy": resourceAwsSesIdentityPolicy(), - "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), - "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), - "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), - "aws_ses_configuration_set": resourceAwsSesConfigurationSet(), - "aws_ses_event_destination": resourceAwsSesEventDestination(), - "aws_ses_identity_notification_topic": resourceAwsSesNotificationTopic(), - "aws_ses_template": resourceAwsSesTemplate(), - "aws_s3_access_point": resourceAwsS3AccessPoint(), - "aws_s3_account_public_access_block": resourceAwsS3AccountPublicAccessBlock(), - "aws_s3_bucket": resourceAwsS3Bucket(), - "aws_s3_bucket_analytics_configuration": resourceAwsS3BucketAnalyticsConfiguration(), - "aws_s3_bucket_policy": resourceAwsS3BucketPolicy(), - "aws_s3_bucket_public_access_block": resourceAwsS3BucketPublicAccessBlock(), - "aws_s3_bucket_object": resourceAwsS3BucketObject(), - "aws_s3_bucket_ownership_controls": resourceAwsS3BucketOwnershipControls(), - "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), - "aws_s3_bucket_metric": resourceAwsS3BucketMetric(), - "aws_s3_bucket_inventory": resourceAwsS3BucketInventory(), - "aws_s3_bucket_replication_configuration": resourceAwsS3BucketReplicationConfiguration(), - "aws_s3_object_copy": resourceAwsS3ObjectCopy(), - "aws_s3control_bucket": resourceAwsS3ControlBucket(), - "aws_s3control_bucket_policy": resourceAwsS3ControlBucketPolicy(), - "aws_s3control_bucket_lifecycle_configuration": resourceAwsS3ControlBucketLifecycleConfiguration(), - "aws_s3outposts_endpoint": resourceAwsS3OutpostsEndpoint(), - "aws_security_group": resourceAwsSecurityGroup(), - "aws_network_interface_sg_attachment": resourceAwsNetworkInterfaceSGAttachment(), - "aws_default_security_group": resourceAwsDefaultSecurityGroup(), - "aws_security_group_rule": resourceAwsSecurityGroupRule(), - "aws_securityhub_account": resourceAwsSecurityHubAccount(), - "aws_securityhub_action_target": resourceAwsSecurityHubActionTarget(), - "aws_securityhub_insight": resourceAwsSecurityHubInsight(), - "aws_securityhub_invite_accepter": resourceAwsSecurityHubInviteAccepter(), - "aws_securityhub_member": resourceAwsSecurityHubMember(), - "aws_securityhub_organization_admin_account": resourceAwsSecurityHubOrganizationAdminAccount(), - "aws_securityhub_organization_configuration": resourceAwsSecurityHubOrganizationConfiguration(), - "aws_securityhub_product_subscription": resourceAwsSecurityHubProductSubscription(), - "aws_securityhub_standards_control": resourceAwsSecurityHubStandardsControl(), - "aws_securityhub_standards_subscription": resourceAwsSecurityHubStandardsSubscription(), - "aws_servicecatalog_budget_resource_association": resourceAwsServiceCatalogBudgetResourceAssociation(), - "aws_servicecatalog_constraint": resourceAwsServiceCatalogConstraint(), - "aws_servicecatalog_organizations_access": resourceAwsServiceCatalogOrganizationsAccess(), - "aws_servicecatalog_portfolio": resourceAwsServiceCatalogPortfolio(), - "aws_servicecatalog_portfolio_share": resourceAwsServiceCatalogPortfolioShare(), - "aws_servicecatalog_product": resourceAwsServiceCatalogProduct(), - "aws_servicecatalog_provisioned_product": resourceAwsServiceCatalogProvisionedProduct(), - "aws_servicecatalog_service_action": resourceAwsServiceCatalogServiceAction(), - "aws_servicecatalog_tag_option": resourceAwsServiceCatalogTagOption(), - "aws_servicecatalog_tag_option_resource_association": resourceAwsServiceCatalogTagOptionResourceAssociation(), - "aws_servicecatalog_principal_portfolio_association": resourceAwsServiceCatalogPrincipalPortfolioAssociation(), - "aws_servicecatalog_product_portfolio_association": resourceAwsServiceCatalogProductPortfolioAssociation(), - "aws_servicecatalog_provisioning_artifact": resourceAwsServiceCatalogProvisioningArtifact(), - "aws_service_discovery_http_namespace": resourceAwsServiceDiscoveryHttpNamespace(), - "aws_service_discovery_private_dns_namespace": resourceAwsServiceDiscoveryPrivateDnsNamespace(), - "aws_service_discovery_public_dns_namespace": resourceAwsServiceDiscoveryPublicDnsNamespace(), - "aws_service_discovery_service": resourceAwsServiceDiscoveryService(), - "aws_servicequotas_service_quota": resourceAwsServiceQuotasServiceQuota(), - "aws_shield_protection": resourceAwsShieldProtection(), - "aws_signer_signing_job": resourceAwsSignerSigningJob(), - "aws_signer_signing_profile": resourceAwsSignerSigningProfile(), - "aws_signer_signing_profile_permission": resourceAwsSignerSigningProfilePermission(), - "aws_simpledb_domain": resourceAwsSimpleDBDomain(), - "aws_ssm_activation": resourceAwsSsmActivation(), - "aws_ssm_association": resourceAwsSsmAssociation(), - "aws_ssm_document": resourceAwsSsmDocument(), - "aws_ssm_maintenance_window": resourceAwsSsmMaintenanceWindow(), - "aws_ssm_maintenance_window_target": resourceAwsSsmMaintenanceWindowTarget(), - "aws_ssm_maintenance_window_task": resourceAwsSsmMaintenanceWindowTask(), - "aws_ssm_patch_baseline": resourceAwsSsmPatchBaseline(), - "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), - "aws_ssm_parameter": resourceAwsSsmParameter(), - "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), - "aws_ssoadmin_account_assignment": resourceAwsSsoAdminAccountAssignment(), - "aws_ssoadmin_managed_policy_attachment": resourceAwsSsoAdminManagedPolicyAttachment(), - "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), - "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), - "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), - "aws_storagegateway_cached_iscsi_volume": resourceAwsStorageGatewayCachedIscsiVolume(), - "aws_storagegateway_file_system_association": resourceAwsStorageGatewayFileSystemAssociation(), - "aws_storagegateway_gateway": resourceAwsStorageGatewayGateway(), - "aws_storagegateway_nfs_file_share": resourceAwsStorageGatewayNfsFileShare(), - "aws_storagegateway_smb_file_share": resourceAwsStorageGatewaySmbFileShare(), - "aws_storagegateway_stored_iscsi_volume": resourceAwsStorageGatewayStoredIscsiVolume(), - "aws_storagegateway_tape_pool": resourceAwsStorageGatewayTapePool(), - "aws_storagegateway_upload_buffer": resourceAwsStorageGatewayUploadBuffer(), - "aws_storagegateway_working_storage": resourceAwsStorageGatewayWorkingStorage(), - "aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(), - "aws_spot_instance_request": resourceAwsSpotInstanceRequest(), - "aws_spot_fleet_request": resourceAwsSpotFleetRequest(), - "aws_sqs_queue": resourceAwsSqsQueue(), - "aws_sqs_queue_policy": resourceAwsSqsQueuePolicy(), - "aws_snapshot_create_volume_permission": resourceAwsSnapshotCreateVolumePermission(), - "aws_sns_platform_application": resourceAwsSnsPlatformApplication(), - "aws_sns_sms_preferences": resourceAwsSnsSmsPreferences(), - "aws_sns_topic": resourceAwsSnsTopic(), - "aws_sns_topic_policy": resourceAwsSnsTopicPolicy(), - "aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(), - "aws_sfn_activity": resourceAwsSfnActivity(), - "aws_sfn_state_machine": resourceAwsSfnStateMachine(), - "aws_default_subnet": resourceAwsDefaultSubnet(), - "aws_subnet": resourceAwsSubnet(), - "aws_swf_domain": resourceAwsSwfDomain(), - "aws_synthetics_canary": resourceAwsSyntheticsCanary(), - "aws_timestreamwrite_database": resourceAwsTimestreamWriteDatabase(), - "aws_timestreamwrite_table": resourceAwsTimestreamWriteTable(), - "aws_transfer_server": resourceAwsTransferServer(), - "aws_transfer_ssh_key": resourceAwsTransferSshKey(), - "aws_transfer_user": resourceAwsTransferUser(), - "aws_volume_attachment": resourceAwsVolumeAttachment(), - "aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(), - "aws_default_vpc_dhcp_options": resourceAwsDefaultVpcDhcpOptions(), - "aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(), - "aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(), - "aws_vpc_peering_connection_accepter": resourceAwsVpcPeeringConnectionAccepter(), - "aws_vpc_peering_connection_options": resourceAwsVpcPeeringConnectionOptions(), - "aws_default_vpc": resourceAwsDefaultVpc(), - "aws_vpc": resourceAwsVpc(), - "aws_vpc_endpoint": resourceAwsVpcEndpoint(), - "aws_vpc_endpoint_connection_notification": resourceAwsVpcEndpointConnectionNotification(), - "aws_vpc_endpoint_route_table_association": resourceAwsVpcEndpointRouteTableAssociation(), - "aws_vpc_endpoint_subnet_association": resourceAwsVpcEndpointSubnetAssociation(), - "aws_vpc_endpoint_service": resourceAwsVpcEndpointService(), - "aws_vpc_endpoint_service_allowed_principal": resourceAwsVpcEndpointServiceAllowedPrincipal(), - "aws_vpc_ipv4_cidr_block_association": resourceAwsVpcIpv4CidrBlockAssociation(), - "aws_vpn_connection": resourceAwsVpnConnection(), - "aws_vpn_connection_route": resourceAwsVpnConnectionRoute(), - "aws_vpn_gateway": resourceAwsVpnGateway(), - "aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(), - "aws_vpn_gateway_route_propagation": resourceAwsVpnGatewayRoutePropagation(), - "aws_waf_byte_match_set": resourceAwsWafByteMatchSet(), - "aws_waf_ipset": resourceAwsWafIPSet(), - "aws_waf_rate_based_rule": resourceAwsWafRateBasedRule(), - "aws_waf_regex_match_set": resourceAwsWafRegexMatchSet(), - "aws_waf_regex_pattern_set": resourceAwsWafRegexPatternSet(), - "aws_waf_rule": resourceAwsWafRule(), - "aws_waf_rule_group": resourceAwsWafRuleGroup(), - "aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(), - "aws_waf_web_acl": resourceAwsWafWebAcl(), - "aws_waf_xss_match_set": resourceAwsWafXssMatchSet(), - "aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(), - "aws_waf_geo_match_set": resourceAwsWafGeoMatchSet(), - "aws_wafregional_byte_match_set": resourceAwsWafRegionalByteMatchSet(), - "aws_wafregional_geo_match_set": resourceAwsWafRegionalGeoMatchSet(), - "aws_wafregional_ipset": resourceAwsWafRegionalIPSet(), - "aws_wafregional_rate_based_rule": resourceAwsWafRegionalRateBasedRule(), - "aws_wafregional_regex_match_set": resourceAwsWafRegionalRegexMatchSet(), - "aws_wafregional_regex_pattern_set": resourceAwsWafRegionalRegexPatternSet(), - "aws_wafregional_rule": resourceAwsWafRegionalRule(), - "aws_wafregional_rule_group": resourceAwsWafRegionalRuleGroup(), - "aws_wafregional_size_constraint_set": resourceAwsWafRegionalSizeConstraintSet(), - "aws_wafregional_sql_injection_match_set": resourceAwsWafRegionalSqlInjectionMatchSet(), - "aws_wafregional_xss_match_set": resourceAwsWafRegionalXssMatchSet(), - "aws_wafregional_web_acl": resourceAwsWafRegionalWebAcl(), - "aws_wafregional_web_acl_association": resourceAwsWafRegionalWebAclAssociation(), - "aws_wafv2_ip_set": resourceAwsWafv2IPSet(), - "aws_wafv2_regex_pattern_set": resourceAwsWafv2RegexPatternSet(), - "aws_wafv2_rule_group": resourceAwsWafv2RuleGroup(), - "aws_wafv2_web_acl": resourceAwsWafv2WebACL(), - "aws_wafv2_web_acl_association": resourceAwsWafv2WebACLAssociation(), - "aws_wafv2_web_acl_logging_configuration": resourceAwsWafv2WebACLLoggingConfiguration(), - "aws_worklink_fleet": resourceAwsWorkLinkFleet(), - "aws_worklink_website_certificate_authority_association": resourceAwsWorkLinkWebsiteCertificateAuthorityAssociation(), - "aws_workspaces_directory": resourceAwsWorkspacesDirectory(), - "aws_workspaces_workspace": resourceAwsWorkspacesWorkspace(), - "aws_batch_compute_environment": resourceAwsBatchComputeEnvironment(), - "aws_batch_job_definition": resourceAwsBatchJobDefinition(), - "aws_batch_job_queue": resourceAwsBatchJobQueue(), - "aws_pinpoint_app": resourceAwsPinpointApp(), - "aws_pinpoint_adm_channel": resourceAwsPinpointADMChannel(), - "aws_pinpoint_apns_channel": resourceAwsPinpointAPNSChannel(), - "aws_pinpoint_apns_sandbox_channel": resourceAwsPinpointAPNSSandboxChannel(), - "aws_pinpoint_apns_voip_channel": resourceAwsPinpointAPNSVoipChannel(), - "aws_pinpoint_apns_voip_sandbox_channel": resourceAwsPinpointAPNSVoipSandboxChannel(), - "aws_pinpoint_baidu_channel": resourceAwsPinpointBaiduChannel(), - "aws_pinpoint_email_channel": resourceAwsPinpointEmailChannel(), - "aws_pinpoint_event_stream": resourceAwsPinpointEventStream(), - "aws_pinpoint_gcm_channel": resourceAwsPinpointGCMChannel(), - "aws_pinpoint_sms_channel": resourceAwsPinpointSMSChannel(), - "aws_xray_encryption_config": resourceAwsXrayEncryptionConfig(), - "aws_xray_group": resourceAwsXrayGroup(), - "aws_xray_sampling_rule": resourceAwsXraySamplingRule(), - "aws_workspaces_ip_group": resourceAwsWorkspacesIpGroup(), - - // ALBs are actually LBs because they can be type `network` or `application` - // To avoid regressions, we will add a new resource for each and they both point - // back to the old ALB version. IF the Terraform supported aliases for resources - // this would be a whole lot simpler - "aws_alb": resourceAwsLb(), - "aws_lb": resourceAwsLb(), - "aws_alb_listener": resourceAwsLbListener(), - "aws_lb_listener": resourceAwsLbListener(), - "aws_alb_listener_certificate": resourceAwsLbListenerCertificate(), - "aws_lb_listener_certificate": resourceAwsLbListenerCertificate(), - "aws_alb_listener_rule": resourceAwsLbbListenerRule(), - "aws_lb_listener_rule": resourceAwsLbbListenerRule(), - "aws_alb_target_group": resourceAwsLbTargetGroup(), - "aws_lb_target_group": resourceAwsLbTargetGroup(), - "aws_alb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), - "aws_lb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), - }, - } - - // Avoid Go formatting churn and Git conflicts - // You probably should not do this - provider.DataSourcesMap["aws_serverlessapplicationrepository_application"] = dataSourceAwsServerlessApplicationRepositoryApplication() - provider.ResourcesMap["aws_serverlessapplicationrepository_cloudformation_stack"] = resourceAwsServerlessApplicationRepositoryCloudFormationStack() - - provider.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) { - terraformVersion := provider.TerraformVersion - if terraformVersion == "" { - // Terraform 0.12 introduced this field to the protocol - // We can therefore assume that if it's missing it's 0.10 or 0.11 - terraformVersion = "0.11+compatible" - } - return providerConfigure(d, terraformVersion) - } - - return provider -} - -var descriptions map[string]string -var endpointServiceNames []string - -func init() { - descriptions = map[string]string{ - "region": "The region where AWS operations will take place. Examples\n" + - "are us-east-1, us-west-2, etc.", // lintignore:AWSAT003 - - "access_key": "The access key for API operations. You can retrieve this\n" + - "from the 'Security & Credentials' section of the AWS console.", - - "secret_key": "The secret key for API operations. You can retrieve this\n" + - "from the 'Security & Credentials' section of the AWS console.", - - "profile": "The profile for API operations. If not set, the default profile\n" + - "created with `aws configure` will be used.", - - "shared_credentials_file": "The path to the shared credentials file. If not set\n" + - "this defaults to ~/.aws/credentials.", - - "token": "session token. A session token is only required if you are\n" + - "using temporary security credentials.", - - "max_retries": "The maximum number of times an AWS API request is\n" + - "being executed. If the API request still fails, an error is\n" + - "thrown.", - - "endpoint": "Use this to override the default service endpoint URL", - - "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + - "default value is `false`", - - "skip_credentials_validation": "Skip the credentials validation via STS API. " + - "Used for AWS API implementations that do not have STS available/implemented.", - - "skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " + - "Used by users that don't have ec2:DescribeAccountAttributes permissions.", - - "skip_region_validation": "Skip static validation of region name. " + - "Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).", - - "skip_requesting_account_id": "Skip requesting the account ID. " + - "Used for AWS API implementations that do not have IAM/STS API and/or metadata API.", - - "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + - "Used for AWS API implementations that do not have a metadata api endpoint.", - - "s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" + - "i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" + - "use virtual hosted bucket addressing when possible\n" + - "(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.", - } - - endpointServiceNames = []string{ - "accessanalyzer", - "acm", - "acmpca", - "amplify", - "apigateway", - "appconfig", - "applicationautoscaling", - "applicationinsights", - "appmesh", - "apprunner", - "appstream", - "appsync", - "athena", - "auditmanager", - "autoscaling", - "autoscalingplans", - "backup", - "batch", - "budgets", - "chime", - "cloud9", - "cloudformation", - "cloudfront", - "cloudhsm", - "cloudsearch", - "cloudtrail", - "cloudwatch", - "cloudwatchevents", - "cloudwatchlogs", - "codeartifact", - "codebuild", - "codecommit", - "codedeploy", - "codepipeline", - "codestarconnections", - "cognitoidentity", - "cognitoidp", - "configservice", - "connect", - "cur", - "dataexchange", - "datapipeline", - "datasync", - "dax", - "detective", - "devicefarm", - "directconnect", - "dlm", - "dms", - "docdb", - "ds", - "dynamodb", - "ec2", - "ecr", - "ecrpublic", - "ecs", - "efs", - "eks", - "elasticache", - "elasticbeanstalk", - "elastictranscoder", - "elb", - "emr", - "emrcontainers", - "es", - "firehose", - "fms", - "forecast", - "fsx", - "gamelift", - "glacier", - "globalaccelerator", - "glue", - "greengrass", - "guardduty", - "iam", - "identitystore", - "imagebuilder", - "inspector", - "iot", - "iotanalytics", - "iotevents", - "kafka", - "kinesis", - "kinesisanalytics", - "kinesisanalyticsv2", - "kinesisvideo", - "kms", - "lakeformation", - "lambda", - "lexmodels", - "licensemanager", - "lightsail", - "location", - "macie", - "macie2", - "managedblockchain", - "marketplacecatalog", - "mediaconnect", - "mediaconvert", - "medialive", - "mediapackage", - "mediastore", - "mediastoredata", - "mq", - "mwaa", - "neptune", - "networkfirewall", - "networkmanager", - "opsworks", - "organizations", - "outposts", - "personalize", - "pinpoint", - "pricing", - "qldb", - "quicksight", - "ram", - "rds", - "redshift", - "resourcegroups", - "resourcegroupstaggingapi", - "route53", - "route53domains", - "route53resolver", - "s3", - "s3control", - "s3outposts", - "sagemaker", - "schemas", - "sdb", - "secretsmanager", - "securityhub", - "serverlessrepo", - "servicecatalog", - "servicediscovery", - "servicequotas", - "ses", - "shield", - "signer", - "sns", - "sqs", - "ssm", - "ssoadmin", - "stepfunctions", - "storagegateway", - "sts", - "swf", - "synthetics", - "timestreamwrite", - "transfer", - "waf", - "wafregional", - "wafv2", - "worklink", - "workmail", - "workspaces", - "xray", - } -} - -func providerConfigure(d *schema.ResourceData, terraformVersion string) (interface{}, error) { - config := Config{ - AccessKey: d.Get("access_key").(string), - SecretKey: d.Get("secret_key").(string), - Profile: d.Get("profile").(string), - Token: d.Get("token").(string), - Region: d.Get("region").(string), - CredsFilename: d.Get("shared_credentials_file").(string), - DefaultTagsConfig: expandProviderDefaultTags(d.Get("default_tags").([]interface{})), - Endpoints: make(map[string]string), - MaxRetries: d.Get("max_retries").(int), - IgnoreTagsConfig: expandProviderIgnoreTags(d.Get("ignore_tags").([]interface{})), - Insecure: d.Get("insecure").(bool), - SkipCredsValidation: d.Get("skip_credentials_validation").(bool), - SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool), - SkipRegionValidation: d.Get("skip_region_validation").(bool), - SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), - SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), - S3ForcePathStyle: d.Get("s3_force_path_style").(bool), - terraformVersion: terraformVersion, - } - - if l, ok := d.Get("assume_role").([]interface{}); ok && len(l) > 0 && l[0] != nil { - m := l[0].(map[string]interface{}) - - if v, ok := m["duration_seconds"].(int); ok && v != 0 { - config.AssumeRoleDurationSeconds = v - } - - if v, ok := m["external_id"].(string); ok && v != "" { - config.AssumeRoleExternalID = v - } - - if v, ok := m["policy"].(string); ok && v != "" { - config.AssumeRolePolicy = v - } - - if policyARNSet, ok := m["policy_arns"].(*schema.Set); ok && policyARNSet.Len() > 0 { - for _, policyARNRaw := range policyARNSet.List() { - policyARN, ok := policyARNRaw.(string) - - if !ok { - continue - } - - config.AssumeRolePolicyARNs = append(config.AssumeRolePolicyARNs, policyARN) - } - } - - if v, ok := m["role_arn"].(string); ok && v != "" { - config.AssumeRoleARN = v - } - - if v, ok := m["session_name"].(string); ok && v != "" { - config.AssumeRoleSessionName = v - } - - if tagMapRaw, ok := m["tags"].(map[string]interface{}); ok && len(tagMapRaw) > 0 { - config.AssumeRoleTags = make(map[string]string) - - for k, vRaw := range tagMapRaw { - v, ok := vRaw.(string) - - if !ok { - continue - } - - config.AssumeRoleTags[k] = v - } - } - - if transitiveTagKeySet, ok := m["transitive_tag_keys"].(*schema.Set); ok && transitiveTagKeySet.Len() > 0 { - for _, transitiveTagKeyRaw := range transitiveTagKeySet.List() { - transitiveTagKey, ok := transitiveTagKeyRaw.(string) - - if !ok { - continue - } - - config.AssumeRoleTransitiveTagKeys = append(config.AssumeRoleTransitiveTagKeys, transitiveTagKey) - } - } - - log.Printf("[INFO] assume_role configuration set: (ARN: %q, SessionID: %q, ExternalID: %q)", config.AssumeRoleARN, config.AssumeRoleSessionName, config.AssumeRoleExternalID) - } - - endpointsSet := d.Get("endpoints").(*schema.Set) - - for _, endpointsSetI := range endpointsSet.List() { - endpoints := endpointsSetI.(map[string]interface{}) - for _, endpointServiceName := range endpointServiceNames { - config.Endpoints[endpointServiceName] = endpoints[endpointServiceName].(string) - } - } - - if v, ok := d.GetOk("allowed_account_ids"); ok { - for _, accountIDRaw := range v.(*schema.Set).List() { - config.AllowedAccountIds = append(config.AllowedAccountIds, accountIDRaw.(string)) - } - } - - if v, ok := d.GetOk("forbidden_account_ids"); ok { - for _, accountIDRaw := range v.(*schema.Set).List() { - config.ForbiddenAccountIds = append(config.ForbiddenAccountIds, accountIDRaw.(string)) - } - } - - return config.Client() -} - -// This is a global MutexKV for use within this plugin. -var awsMutexKV = mutexkv.NewMutexKV() - -func assumeRoleSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "duration_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: "Seconds to restrict the assume role session duration.", - }, - "external_id": { - Type: schema.TypeString, - Optional: true, - Description: "Unique identifier that might be required for assuming a role in another account.", - }, - "policy": { - Type: schema.TypeString, - Optional: true, - Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", - ValidateFunc: validation.StringIsJSON, - }, - "policy_arns": { - Type: schema.TypeSet, - Optional: true, - Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateArn, - }, - }, - "role_arn": { - Type: schema.TypeString, - Optional: true, - Description: "Amazon Resource Name of an IAM Role to assume prior to making API calls.", - ValidateFunc: validateArn, - }, - "session_name": { - Type: schema.TypeString, - Optional: true, - Description: "Identifier for the assumed role session.", - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - Description: "Assume role session tags.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "transitive_tag_keys": { - Type: schema.TypeSet, - Optional: true, - Description: "Assume role session tag keys to pass to any subsequent sessions.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - } -} - -func endpointsSchema() *schema.Schema { - endpointsAttributes := make(map[string]*schema.Schema) - - for _, endpointServiceName := range endpointServiceNames { - endpointsAttributes[endpointServiceName] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["endpoint"], - } - } - - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: endpointsAttributes, - }, - } -} - -func expandProviderDefaultTags(l []interface{}) *keyvaluetags.DefaultConfig { - if len(l) == 0 || l[0] == nil { - return nil - } - - defaultConfig := &keyvaluetags.DefaultConfig{} - m := l[0].(map[string]interface{}) - - if v, ok := m["tags"].(map[string]interface{}); ok { - defaultConfig.Tags = keyvaluetags.New(v) - } - return defaultConfig -} - -func expandProviderIgnoreTags(l []interface{}) *keyvaluetags.IgnoreConfig { - if len(l) == 0 || l[0] == nil { - return nil - } - - ignoreConfig := &keyvaluetags.IgnoreConfig{} - m := l[0].(map[string]interface{}) - - if v, ok := m["keys"].(*schema.Set); ok { - ignoreConfig.Keys = keyvaluetags.New(v.List()) - } - - if v, ok := m["key_prefixes"].(*schema.Set); ok { - ignoreConfig.KeyPrefixes = keyvaluetags.New(v.List()) - } - - return ignoreConfig -} - -// ReverseDns switches a DNS hostname to reverse DNS and vice-versa. -func ReverseDns(hostname string) string { - parts := strings.Split(hostname, ".") - - for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { - parts[i], parts[j] = parts[j], parts[i] - } - - return strings.Join(parts, ".") -} diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 4c6937093410..792c5e4dc3ba 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -13,9 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3/waiter" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" + "github.com/terraform-providers/terraform-provider-aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/internal/service/s3/waiter" + "github.com/terraform-providers/terraform-provider-aws/internal/tfresource" ) func ResourceBucketReplicationConfiguration() *schema.Resource { From a740404b901a7951e1d8e06fc7ccbda7dceb5992 Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Wed, 3 Nov 2021 14:39:30 -0700 Subject: [PATCH 092/304] tracking changes from upstream --- .../s3/bucket_replication_configuration.go | 41 +-- .../bucket_replication_configuration_test.go | 245 +++++++++--------- 2 files changed, 147 insertions(+), 139 deletions(-) diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 792c5e4dc3ba..d41c3dc37c58 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -13,9 +13,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/internal/keyvaluetags" - "github.com/terraform-providers/terraform-provider-aws/internal/service/s3/waiter" - "github.com/terraform-providers/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) func ResourceBucketReplicationConfiguration() *schema.Resource { @@ -61,12 +62,12 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "account_id": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateAwsAccountId, + ValidateFunc: verify.ValidAccountID, }, "bucket": { Type: schema.TypeString, Required: true, - ValidateFunc: validateArn, + ValidateFunc: verify.ValidARN, }, "storage_class": { Type: schema.TypeString, @@ -221,7 +222,7 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Optional: true, ValidateFunc: validation.StringLenBetween(0, 1024), }, - "tags": tagsSchema(), + "tags": tftags.TagsSchema(), }, }, }, @@ -293,10 +294,10 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met input.Bucket = bucket } - s3conn := meta.(*AWSClient).s3conn + conn := meta.(*conns.AWSClient).S3Conn - err := resource.Retry(waiter.BucketCreatedTimeout, func() *resource.RetryError { - _, err := s3conn.HeadBucket(input) + err := resource.Retry(bucketCreatedTimeout, func() *resource.RetryError { + _, err := conn.HeadBucket(input) if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { return resource.RetryableError(err) @@ -314,7 +315,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met }) if tfresource.TimedOut(err) { - _, err = s3conn.HeadBucket(input) + _, err = conn.HeadBucket(input) } if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { @@ -332,12 +333,12 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } // Read the bucket replication configuration - replicationResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + replicationResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketReplication(&s3.GetBucketReplicationInput{ Bucket: bucket, }) }) - if err != nil && !isAWSErr(err, "ReplicationConfigurationNotFoundError", "") { + if err != nil && !tfawserr.ErrMessageContains(err, "ReplicationConfigurationNotFoundError", "") { return fmt.Errorf("error getting S3 Bucket replication: %s", err) } replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) @@ -433,11 +434,11 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met m["prefix"] = aws.StringValue(f.Prefix) } if t := f.Tag; t != nil { - m["tags"] = keyvaluetags.S3KeyValueTags([]*s3.Tag{t}).IgnoreAws().Map() + m["tags"] = KeyValueTags([]*s3.Tag{t}).IgnoreAWS().Map() } if a := f.And; a != nil { m["prefix"] = aws.StringValue(a.Prefix) - m["tags"] = keyvaluetags.S3KeyValueTags(a.Tags).IgnoreAws().Map() + m["tags"] = KeyValueTags(a.Tags).IgnoreAWS().Map() } t["filter"] = []interface{}{m} @@ -456,7 +457,7 @@ func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, met } func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn + s3conn := meta.(*conns.AWSClient).S3Conn bucket := d.Get("bucket").(string) rc := &s3.ReplicationConfiguration{} @@ -575,7 +576,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) rcRule.Filter = &s3.ReplicationRuleFilter{} filter := f[0].(map[string]interface{}) - tags := keyvaluetags.New(filter["tags"]).IgnoreAws().S3Tags() + tags := Tags(tftags.New(filter["tags"]).IgnoreAWS()) if len(tags) > 0 { rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ Prefix: aws.String(filter["prefix"].(string)), @@ -609,7 +610,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m err := resource.Retry(1*time.Minute, func() *resource.RetryError { _, err := s3conn.PutBucketReplication(i) - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { return resource.RetryableError(err) } if err != nil { @@ -617,7 +618,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } return nil }) - if isResourceTimeoutError(err) { + if tfresource.TimedOut(err) { _, err = s3conn.PutBucketReplication(i) } if err != nil { @@ -628,7 +629,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn + s3conn := meta.(*conns.AWSClient).S3Conn bucket := d.Get("bucket").(string) log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 28bc9f8e5024..5947bdc27cce 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1,4 +1,4 @@ -package s3 +package s3_test import ( "fmt" @@ -9,15 +9,18 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -26,19 +29,19 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -59,7 +62,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -80,7 +83,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -109,7 +112,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -117,12 +120,12 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: testAccErrorCheckSkipS3(t), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), @@ -169,7 +172,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test } func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -177,12 +180,12 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: testAccErrorCheckSkipS3(t), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), @@ -233,7 +236,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -241,12 +244,12 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, ErrorCheck: testAccErrorCheckSkipS3(t), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), @@ -285,8 +288,8 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -295,19 +298,19 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -339,7 +342,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -373,8 +376,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -383,19 +386,19 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -424,7 +427,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -449,8 +452,8 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo } func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -459,19 +462,19 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigRTC(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -509,8 +512,8 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -519,19 +522,19 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -563,7 +566,7 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { - rInt := acctest.RandInt() + rInt := sdkacctest.RandInt() resourceName := "aws_s3_bucket_replication_configuration.replication" // record the initialized providers so that we can use them to check for the instances in each region @@ -571,12 +574,12 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -594,8 +597,8 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { } func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -604,19 +607,19 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -643,7 +646,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -677,7 +680,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -712,7 +715,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -751,7 +754,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -795,28 +798,31 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := acctest.RandInt() - rName := acctest.RandomWithPrefix("tf-acc-test") - rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix("tf-acc-test") + rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3BucketDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { ID: aws.String("testid"), Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), StorageClass: aws.String(s3.ObjectStorageClassStandard), }, Status: aws.String(s3.ReplicationRuleStatusEnabled), @@ -854,28 +860,31 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) return } resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := acctest.RandInt() - rName := acctest.RandomWithPrefix("tf-acc-test") - rNameDestination := acctest.RandomWithPrefix("tf-acc-test") + rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix("tf-acc-test") + rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3BucketDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), + acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { ID: aws.String("testid"), Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", testAccGetPartition(), rNameDestination)), + Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), StorageClass: aws.String(s3.ObjectStorageClassStandard), }, Status: aws.String(s3.ReplicationRuleStatusEnabled), @@ -907,8 +916,8 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) } func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { - rInt := acctest.RandInt() - partition := testAccGetPartition() + rInt := sdkacctest.RandInt() + partition := acctest.Partition() iamRoleResourceName := "aws_iam_role.role" resourceName := "aws_s3_bucket_replication_configuration.replication" @@ -927,19 +936,19 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - testAccPreCheck(t) - testAccMultipleRegionPreCheck(t, 2) + acctest.PreCheck(t) + acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: testAccErrorCheck(t, s3.EndpointsID), - ProviderFactories: testAccProviderFactoriesAlternate(&providers), - CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckAWSS3BucketReplicationRules( + testAccCheckBucketReplicationRules( resourceName, []*s3.ReplicationRule{ { @@ -968,7 +977,7 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { }) } -func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { +func testAccCheckBucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] for _, rule := range rules { @@ -999,17 +1008,15 @@ func testAccCheckAWSS3BucketReplicationRules(n string, rules []*s3.ReplicationRu } } - conn := testAccProvider.Meta().(*AWSClient).s3conn + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ Bucket: aws.String(rs.Primary.ID), }) - if err != nil { - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { - return fmt.Errorf("S3 bucket not found") - } - if rules == nil { - return nil - } + if err != nil && tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") { + return fmt.Errorf("S3 bucket not found") + } else if err != nil && rules == nil { + return nil + } else if err != nil { return fmt.Errorf("GetReplicationConfiguration error: %v", err) } @@ -1156,7 +1163,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(randInt int) string { - return composeConfig( + return acctest.ConfigCompose( testAccAWSS3BucketReplicationConfigBasic(randInt), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { @@ -1230,7 +1237,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(randInt int) string { - return composeConfig( + return acctest.ConfigCompose( testAccAWSS3BucketReplicationConfigBasic(randInt), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { @@ -1315,7 +1322,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } func testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(randInt int) string { - return composeConfig( + return acctest.ConfigCompose( testAccAWSS3BucketReplicationConfigBasic(randInt), fmt.Sprintf(` resource "aws_s3_bucket" "destination2" { From 0ba4166c3c4254a3ad01d42c4caf9f35e9016dec Mon Sep 17 00:00:00 2001 From: Dave Kujawski Date: Sat, 6 Nov 2021 09:09:29 -0700 Subject: [PATCH 093/304] update tests to track V2 changes --- .../bucket_replication_configuration_test.go | 392 ++++-------------- 1 file changed, 90 insertions(+), 302 deletions(-) diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 5947bdc27cce..5575cee8b943 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -6,6 +6,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -16,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { @@ -34,7 +36,7 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), @@ -125,7 +127,7 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test }, ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), @@ -185,12 +187,12 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t }, ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), + resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ "id": "rule1", "priority": "1", @@ -205,22 +207,10 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t "priority": "2", "status": "Enabled", "filter.#": "1", - "filter.0.tags.%": "1", - "filter.0.tags.Key2": "Value2", + "filter.0.prefix": "prefix2", "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ - "id": "rule3", - "priority": "3", - "status": "Disabled", - "filter.#": "1", - "filter.0.prefix": "prefix3", - "filter.0.tags.%": "1", - "filter.0.tags.Key3": "Value3", - "destination.#": "1", - "destination.0.storage_class": "ONEZONE_IA", - }), ), }, { @@ -249,7 +239,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { }, ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), @@ -269,8 +259,7 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { "priority": "2", "status": "Enabled", "filter.#": "1", - "filter.0.tags.%": "1", - "filter.0.tags.Key2": "Value2", + "filter.0.prefix": "prefix1", "destination.#": "1", "destination.0.storage_class": "STANDARD_IA", }), @@ -303,7 +292,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), @@ -391,7 +380,7 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), @@ -467,7 +456,7 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigRTC(rInt), @@ -496,7 +485,7 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { }, }, DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), }, Filter: &s3.ReplicationRuleFilter{ Prefix: aws.String("foo"), @@ -527,7 +516,7 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), @@ -544,7 +533,7 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), }, DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), }, Filter: &s3.ReplicationRuleFilter{ Prefix: aws.String("foo"), @@ -579,7 +568,7 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), @@ -612,35 +601,8 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, { Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), Check: resource.ComposeTestCheckFunc( @@ -675,123 +637,6 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - And: &s3.ReplicationRuleAndOperator{ - Prefix: aws.String(""), - Tags: []*s3.Tag{ - { - Key: aws.String("ReplicateMe"), - Value: aws.String("Yes"), - }, - }, - }, - }, - Priority: aws.Int64(42), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - And: &s3.ReplicationRuleAndOperator{ - Prefix: aws.String("foo"), - Tags: []*s3.Tag{ - { - Key: aws.String("ReplicateMe"), - Value: aws.String("Yes"), - }, - { - Key: aws.String("AnotherTag"), - Value: aws.String("OK"), - }, - }, - }, - }, - Priority: aws.Int64(41), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, - { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - And: &s3.ReplicationRuleAndOperator{ - Prefix: aws.String(""), - Tags: []*s3.Tag{ - { - Key: aws.String("ReplicateMe"), - Value: aws.String("Yes"), - }, - { - Key: aws.String("AnotherTag"), - Value: aws.String("OK"), - }, - { - Key: aws.String("Foo"), - Value: aws.String("Bar"), - }, - }, - }, - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), - }, - }, - }, - ), - ), - }, }, }) } @@ -809,7 +654,7 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), @@ -871,7 +716,7 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), @@ -941,7 +786,7 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), - CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), @@ -1038,6 +883,39 @@ func testAccCheckBucketReplicationRules(n string, rules []*s3.ReplicationRule) r } } +func testAccCheckReplicationConfigDestroy(s *terraform.State, provider *schema.Provider) error { + conn := provider.Meta().(*conns.AWSClient).S3Conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket_replication_configuration" { + continue + } + input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.GetBucketReplication(input) + + if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "NotFound", "") { + return nil + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("AWS S3 Bucket Replication Configuration still exists: %s", rs.Primary.ID)) + }) + + if tfresource.TimedOut(err) { + _, err = conn.GetBucketReplication(input) + } + + if err != nil { + return err + } + } + return nil +} + func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { return fmt.Sprintf(` data "aws_partition" "current" {} @@ -1119,6 +997,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "foo" } status = "Enabled" + delete_marker_replication { + status = "Enabled" + } destination { bucket = aws_s3_bucket.destination.arn replication_time { @@ -1154,6 +1035,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" } } + delete_marker_replication { + status = "Enabled" + } + status = "Enabled" destination { bucket = aws_s3_bucket.destination.arn @@ -1201,6 +1086,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { filter {} + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination.arn storage_class = "STANDARD" @@ -1214,6 +1103,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { filter {} + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination2.arn storage_class = "STANDARD_IA" @@ -1227,6 +1120,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { filter {} + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination3.arn storage_class = "ONEZONE_IA" @@ -1277,6 +1174,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "prefix1" } + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination.arn storage_class = "STANDARD" @@ -1289,9 +1190,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - tags = { - Key2 = "Value2" - } + prefix = "prefix2" + } + + delete_marker_replication { + status = "Enabled" } destination { @@ -1300,24 +1203,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { } } - rules { - id = "rule3" - priority = 3 - status = "Disabled" - - filter { - prefix = "prefix3" - - tags = { - Key3 = "Value3" - } - } - - destination { - bucket = aws_s3_bucket.destination3.arn - storage_class = "ONEZONE_IA" - } - } }`, randInt)) } @@ -1350,6 +1235,10 @@ resource "aws_s3_bucket_replication_configuration" "replication" { prefix = "prefix1" } + delete_marker_replication { + status = "Enabled" + } + destination { bucket = aws_s3_bucket.destination.arn storage_class = "STANDARD" @@ -1362,9 +1251,11 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - tags = { - Key2 = "Value2" - } + prefix = "prefix1" + } + + delete_marker_replication { + status = "Enabled" } destination { @@ -1512,28 +1403,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationDeleteMarkerReplicationDisabled(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - filter { - prefix = "foo" - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - func testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(randInt int) string { return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` resource "aws_s3_bucket_replication_configuration" "replication" { @@ -1560,87 +1429,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { }` } -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationOnlyOneTag(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - priority = 42 - - filter { - tags = { - ReplicateMe = "Yes" - } - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationPrefixAndTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - priority = 41 - - filter { - prefix = "foo" - - tags = { - AnotherTag = "OK" - ReplicateMe = "Yes" - } - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - -func testAccAWSS3BucketReplicationConfigWithV2ConfigurationMultipleTags(randInt int) string { - return testAccAWSS3BucketReplicationConfigBasic(randInt) + ` -resource "aws_s3_bucket_replication_configuration" "replication" { - bucket = aws_s3_bucket.source.id - role = aws_iam_role.role.arn - - rules { - id = "foobar" - status = "Enabled" - - filter { - tags = { - AnotherTag = "OK" - Foo = "Bar" - ReplicateMe = "Yes" - } - } - - destination { - bucket = aws_s3_bucket.destination.arn - storage_class = "STANDARD" - } - } -}` -} - func testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination string, rInt int) string { return fmt.Sprintf(` resource "aws_iam_role" "test" { From 25958a6f323a156ccc047112e718a666ac6fad30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edgar=20L=C3=B3pez?= Date: Mon, 8 Nov 2021 19:57:36 -0700 Subject: [PATCH 094/304] refactor --- .../service/appstream/directory_config.go | 6 +-- .../appstream/directory_config_test.go | 38 ++++++++++++++++--- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/internal/service/appstream/directory_config.go b/internal/service/appstream/directory_config.go index eb7f1aaab548..9b02e4248721 100644 --- a/internal/service/appstream/directory_config.go +++ b/internal/service/appstream/directory_config.go @@ -76,10 +76,9 @@ func resourceDirectoryConfigCreate(ctx context.Context, d *schema.ResourceData, ServiceAccountCredentials: expandServiceAccountCredentials(d.Get("service_account_credentials").([]interface{})), } - var err error var output *appstream.CreateDirectoryConfigOutput - err = resource.RetryContext(ctx, directoryConfigTimeout, func() *resource.RetryError { - output, err = conn.CreateDirectoryConfigWithContext(ctx, input) + err := resource.RetryContext(ctx, directoryConfigTimeout, func() *resource.RetryError { + out, err := conn.CreateDirectoryConfigWithContext(ctx, input) if err != nil { if tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { return resource.RetryableError(err) @@ -87,6 +86,7 @@ func resourceDirectoryConfigCreate(ctx context.Context, d *schema.ResourceData, return resource.NonRetryableError(err) } + output = out return nil }) diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index d3acdc2ba40f..8581ab215d69 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -39,6 +39,9 @@ func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), resource.TestCheckResourceAttr(resourceName, "directory_name", rName), acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_name", rUserName), + resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_password", rPassword), ), }, { @@ -46,6 +49,10 @@ func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), resource.TestCheckResourceAttr(resourceName, "directory_name", rName), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_name", rUserNameUpdated), + resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_password", rPasswordUpdated), + acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), ), }, @@ -138,21 +145,40 @@ func testAccCheckDirectoryConfigDestroy(s *terraform.State) error { } func testAccDirectoryConfigConfig(name, userName, password string) string { - return fmt.Sprintf(` -data "aws_organizations_organization" "test" {} + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = "10.0.${count.index}.0/24" + vpc_id = aws_vpc.test.id +} + +resource "aws_directory_service_directory" "test" { + name = %[1]q + password = %[3]q + edition = "Standard" + type = "MicrosoftAD" -data "aws_organizations_organizational_units" "test" { - parent_id = data.aws_organizations_organization.test.roots[0].id + vpc_settings { + vpc_id = aws_vpc.test.id + subnet_ids = aws_subnet.test.*.id + } } resource "aws_appstream_directory_config" "test" { directory_name = %[1]q - organizational_unit_distinguished_names = data.aws_organizations_organizational_units.test.children.*.id + organizational_unit_distinguished_names = [aws_directory_service_directory.test.id] service_account_credentials{ account_name = %[2]q account_password = %[3]q } } -`, name, userName, password) +`, name, userName, password)) } From 87f073179ef14a6a5c267b6a6afcb1bcb5e21c46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edgar=20L=C3=B3pez?= Date: Mon, 8 Nov 2021 20:25:58 -0700 Subject: [PATCH 095/304] fixes linter --- internal/service/appstream/directory_config_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index 8581ab215d69..2b32cbb16c48 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -174,8 +174,8 @@ resource "aws_directory_service_directory" "test" { resource "aws_appstream_directory_config" "test" { directory_name = %[1]q organizational_unit_distinguished_names = [aws_directory_service_directory.test.id] - - service_account_credentials{ + + service_account_credentials { account_name = %[2]q account_password = %[3]q } From cf240833f0ccc67e9b48a2b1465808e9d4d85735 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edgar=20L=C3=B3pez?= Date: Tue, 9 Nov 2021 10:01:58 -0700 Subject: [PATCH 096/304] splat expression --- internal/service/appstream/directory_config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index 2b32cbb16c48..9c60e984a8ac 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -167,7 +167,7 @@ resource "aws_directory_service_directory" "test" { vpc_settings { vpc_id = aws_vpc.test.id - subnet_ids = aws_subnet.test.*.id + subnet_ids = aws_subnet.test[*].id } } From be1a0e805996bbc0aa2da0217e3a9e887603a3d3 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Wed, 10 Nov 2021 00:38:13 +0200 Subject: [PATCH 097/304] emr cluster add `auto_termination_policy` --- internal/service/emr/cluster.go | 203 ++++++++++++++------ internal/service/emr/cluster_test.go | 181 ++++++++++++----- internal/service/emr/instance_group_test.go | 2 +- 3 files changed, 278 insertions(+), 108 deletions(-) diff --git a/internal/service/emr/cluster.go b/internal/service/emr/cluster.go index f361f47d417d..c3544b20ffbe 100644 --- a/internal/service/emr/cluster.go +++ b/internal/service/emr/cluster.go @@ -45,6 +45,20 @@ func ResourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "auto_termination_policy": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "idle_timeout": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(60, 604800), + }, + }, + }, + }, "name": { Type: schema.TypeString, ForceNew: true, @@ -407,15 +421,10 @@ func ResourceCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "action_on_failure": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - emr.ActionOnFailureCancelAndWait, - emr.ActionOnFailureContinue, - emr.ActionOnFailureTerminateCluster, - emr.ActionOnFailureTerminateJobFlow, - }, false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(emr.ActionOnFailure_Values(), false), }, "hadoop_jar_step": { Type: schema.TypeList, @@ -484,14 +493,11 @@ func ResourceCluster() *schema.Resource { Required: true, }, "scale_down_behavior": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - emr.ScaleDownBehaviorTerminateAtInstanceHour, - emr.ScaleDownBehaviorTerminateAtTaskCompletion, - }, false), + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(emr.ScaleDownBehavior_Values(), false), }, "security_configuration": { Type: schema.TypeString, @@ -772,7 +778,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { var autoScalingPolicy *emr.AutoScalingPolicy if err := json.Unmarshal([]byte(v.(string)), &autoScalingPolicy); err != nil { - return fmt.Errorf("error parsing core_instance_group Auto Scaling Policy JSON: %s", err) + return fmt.Errorf("error parsing core_instance_group Auto Scaling Policy JSON: %w", err) } instanceGroup.AutoScalingPolicy = autoScalingPolicy @@ -922,7 +928,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { } params.Configurations, err = expandConfigurationJson(info) if err != nil { - return fmt.Errorf("Error reading EMR configurations_json: %s", err) + return fmt.Errorf("Error reading EMR configurations_json: %w", err) } } @@ -931,6 +937,9 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { kerberosAttributesMap := kerberosAttributesList[0].(map[string]interface{}) params.KerberosAttributes = expandEmrKerberosAttributes(kerberosAttributesMap) } + if v, ok := d.GetOk("auto_termination_policy"); ok && len(v.([]interface{})) > 0 { + params.AutoTerminationPolicy = expandAutoTerminationPolicy(v.([]interface{})) + } log.Printf("[DEBUG] EMR Cluster create options: %s", params) @@ -953,7 +962,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { resp, err = conn.RunJobFlow(params) } if err != nil { - return fmt.Errorf("error running EMR Job Flow: %s", err) + return fmt.Errorf("error running EMR Job Flow: %w", err) } d.SetId(aws.StringValue(resp.JobFlowId)) @@ -979,7 +988,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { clusterRaw, err := stateConf.WaitForState() if err != nil { - return fmt.Errorf("Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\": %s", err) + return fmt.Errorf("Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\": %w", err) } // For multiple master nodes, EMR automatically enables @@ -1025,7 +1034,7 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.SetId("") return nil } - return fmt.Errorf("Error reading EMR cluster: %s", err) + return fmt.Errorf("Error reading EMR cluster: %w", err) } if resp.Cluster == nil { @@ -1060,15 +1069,15 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { flattenedCoreInstanceGroup, err := flattenEmrCoreInstanceGroup(coreGroup) if err != nil { - return fmt.Errorf("error flattening core_instance_group: %s", err) + return fmt.Errorf("error flattening core_instance_group: %w", err) } if err := d.Set("core_instance_group", flattenedCoreInstanceGroup); err != nil { - return fmt.Errorf("error setting core_instance_group: %s", err) + return fmt.Errorf("error setting core_instance_group: %w", err) } if err := d.Set("master_instance_group", flattenEmrMasterInstanceGroup(masterGroup)); err != nil { - return fmt.Errorf("error setting master_instance_group: %s", err) + return fmt.Errorf("error setting master_instance_group: %w", err) } } @@ -1081,12 +1090,12 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { flattenedCoreInstanceFleet := flattenInstanceFleet(coreFleet) if err := d.Set("core_instance_fleet", flattenedCoreInstanceFleet); err != nil { - return fmt.Errorf("error setting core_instance_fleet: %s", err) + return fmt.Errorf("error setting core_instance_fleet: %w", err) } flattenedMasterInstanceFleet := flattenInstanceFleet(masterFleet) if err := d.Set("master_instance_fleet", flattenedMasterInstanceFleet); err != nil { - return fmt.Errorf("error setting master_instance_fleet: %s", err) + return fmt.Errorf("error setting master_instance_fleet: %w", err) } } @@ -1121,36 +1130,36 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { } if err := d.Set("applications", flattenApplications(cluster.Applications)); err != nil { - return fmt.Errorf("error setting EMR Applications for cluster (%s): %s", d.Id(), err) + return fmt.Errorf("error setting EMR Applications for cluster (%s): %w", d.Id(), err) } if _, ok := d.GetOk("configurations_json"); ok { configOut, err := flattenConfigurationJson(cluster.Configurations) if err != nil { - return fmt.Errorf("Error reading EMR cluster configurations: %s", err) + return fmt.Errorf("Error reading EMR cluster configurations: %w", err) } if err := d.Set("configurations_json", configOut); err != nil { - return fmt.Errorf("Error setting EMR configurations_json for cluster (%s): %s", d.Id(), err) + return fmt.Errorf("Error setting EMR configurations_json for cluster (%s): %w", d.Id(), err) } } if err := d.Set("ec2_attributes", flattenEc2Attributes(cluster.Ec2InstanceAttributes)); err != nil { - return fmt.Errorf("error setting EMR Ec2 Attributes: %s", err) + return fmt.Errorf("error setting EMR Ec2 Attributes: %w", err) } if err := d.Set("kerberos_attributes", flattenEmrKerberosAttributes(d, cluster.KerberosAttributes)); err != nil { - return fmt.Errorf("error setting kerberos_attributes: %s", err) + return fmt.Errorf("error setting kerberos_attributes: %w", err) } respBootstraps, err := conn.ListBootstrapActions(&emr.ListBootstrapActionsInput{ ClusterId: cluster.Id, }) if err != nil { - return fmt.Errorf("error listing bootstrap actions: %s", err) + return fmt.Errorf("error listing bootstrap actions: %w", err) } if err := d.Set("bootstrap_action", flattenBootstrapArguments(respBootstraps.BootstrapActions)); err != nil { - return fmt.Errorf("error setting Bootstrap Actions: %s", err) + return fmt.Errorf("error setting Bootstrap Actions: %w", err) } var stepSummaries []*emr.StepSummary @@ -1165,10 +1174,10 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { return !lastPage }) if err != nil { - return fmt.Errorf("error listing steps: %s", err) + return fmt.Errorf("error listing steps: %w", err) } if err := d.Set("step", flattenEmrStepSummaries(stepSummaries)); err != nil { - return fmt.Errorf("error setting step: %s", err) + return fmt.Errorf("error setting step: %w", err) } // AWS provides no other way to read back the additional_info @@ -1180,6 +1189,21 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("additional_info", info) } + termPolInput := &emr.GetAutoTerminationPolicyInput{ + ClusterId: aws.String(d.Id()), + } + + atpOut, err := conn.GetAutoTerminationPolicy(termPolInput) + if err != nil { + if !tfawserr.ErrMessageContains(err, "ValidationException", "Auto-termination is not available for this account when using this release of EMR") { + return fmt.Errorf("error getting auto termination policy: %w", err) + } + } + + if err := d.Set("auto_termination_policy", flattenAutoTerminationPolicy(atpOut.AutoTerminationPolicy)); err != nil { + return fmt.Errorf("error setting auto_termination_policy: %w", err) + } + return nil } @@ -1197,6 +1221,32 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("auto_termination_policy") { + _, n := d.GetChange("auto_termination_policy") + if len(n.([]interface{})) > 0 { + log.Printf("[DEBUG] Putting EMR cluster Auto Termination Policy") + + _, errModify := conn.PutAutoTerminationPolicy(&emr.PutAutoTerminationPolicyInput{ + AutoTerminationPolicy: expandAutoTerminationPolicy(n.([]interface{})), + ClusterId: aws.String(d.Id()), + }) + if errModify != nil { + log.Printf("[ERROR] %s", errModify) + return errModify + } + } else { + log.Printf("[DEBUG] Removing EMR cluster Auto Termination Policy") + + _, errModify := conn.RemoveAutoTerminationPolicy(&emr.RemoveAutoTerminationPolicyInput{ + ClusterId: aws.String(d.Id()), + }) + if errModify != nil { + log.Printf("[ERROR] %s", errModify) + return errModify + } + } + } + if d.HasChange("termination_protection") { _, errModify := conn.SetTerminationProtection(&emr.SetTerminationProtectionInput{ JobFlowIds: []*string{aws.String(d.Id())}, @@ -1216,7 +1266,7 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { var autoScalingPolicy *emr.AutoScalingPolicy if err := json.Unmarshal([]byte(autoscalingPolicyStr), &autoScalingPolicy); err != nil { - return fmt.Errorf("error parsing core_instance_group Auto Scaling Policy JSON: %s", err) + return fmt.Errorf("error parsing core_instance_group Auto Scaling Policy JSON: %w", err) } input := &emr.PutAutoScalingPolicyInput{ @@ -1441,12 +1491,13 @@ func CountRemainingInstances(resp *emr.ListInstancesOutput, emrClusterId string) // before moving on var terminated []string for j, i := range resp.Instances { + instanceId := aws.StringValue(i.Ec2InstanceId) if i.Status != nil { if aws.StringValue(i.Status.State) == emr.InstanceStateTerminated { - terminated = append(terminated, *i.Ec2InstanceId) + terminated = append(terminated, instanceId) } } else { - log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, *i.Ec2InstanceId) + log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, instanceId) } } if len(terminated) == instanceCount { @@ -1472,7 +1523,7 @@ func flattenApplications(apps []*emr.Application) []interface{} { appOut := make([]interface{}, 0, len(apps)) for _, app := range apps { - appOut = append(appOut, *app.Name) + appOut = append(appOut, aws.StringValue(app.Name)) } return appOut } @@ -1482,22 +1533,22 @@ func flattenEc2Attributes(ia *emr.Ec2InstanceAttributes) []map[string]interface{ result := make([]map[string]interface{}, 0) if ia.Ec2KeyName != nil { - attrs["key_name"] = *ia.Ec2KeyName + attrs["key_name"] = aws.StringValue(ia.Ec2KeyName) } if ia.Ec2SubnetId != nil { - attrs["subnet_id"] = *ia.Ec2SubnetId + attrs["subnet_id"] = aws.StringValue(ia.Ec2SubnetId) } if ia.RequestedEc2SubnetIds != nil && len(ia.RequestedEc2SubnetIds) > 0 { attrs["subnet_ids"] = flex.FlattenStringSet(ia.RequestedEc2SubnetIds) } if ia.IamInstanceProfile != nil { - attrs["instance_profile"] = *ia.IamInstanceProfile + attrs["instance_profile"] = aws.StringValue(ia.IamInstanceProfile) } if ia.EmrManagedMasterSecurityGroup != nil { - attrs["emr_managed_master_security_group"] = *ia.EmrManagedMasterSecurityGroup + attrs["emr_managed_master_security_group"] = aws.StringValue(ia.EmrManagedMasterSecurityGroup) } if ia.EmrManagedSlaveSecurityGroup != nil { - attrs["emr_managed_slave_security_group"] = *ia.EmrManagedSlaveSecurityGroup + attrs["emr_managed_slave_security_group"] = aws.StringValue(ia.EmrManagedSlaveSecurityGroup) } if len(ia.AdditionalMasterSecurityGroups) > 0 { @@ -1510,7 +1561,7 @@ func flattenEc2Attributes(ia *emr.Ec2InstanceAttributes) []map[string]interface{ } if ia.ServiceAccessSecurityGroup != nil { - attrs["service_access_security_group"] = *ia.ServiceAccessSecurityGroup + attrs["service_access_security_group"] = aws.StringValue(ia.ServiceAccessSecurityGroup) } result = append(result, attrs) @@ -1529,7 +1580,7 @@ func flattenEmrAutoScalingPolicyDescription(policy *emr.AutoScalingPolicyDescrip // for the statefile. for i, rule := range policy.Rules { for j, dimension := range rule.Trigger.CloudWatchAlarmDefinition.Dimensions { - if *dimension.Key == "JobFlowId" { + if aws.StringValue(dimension.Key) == "JobFlowId" { tmpDimensions := append(policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[:j], policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[j+1:]...) policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions = tmpDimensions } @@ -1545,13 +1596,13 @@ func flattenEmrAutoScalingPolicyDescription(policy *emr.AutoScalingPolicyDescrip } autoscalingPolicyConstraintsBytes, err := json.Marshal(tmpAutoScalingPolicy.Constraints) if err != nil { - return "", fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Constraints: %s", err) + return "", fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Constraints: %w", err) } autoscalingPolicyConstraintsString := string(autoscalingPolicyConstraintsBytes) autoscalingPolicyRulesBytes, err := json.Marshal(tmpAutoScalingPolicy.Rules) if err != nil { - return "", fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Rules: %s", err) + return "", fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Rules: %w", err) } var rules []map[string]interface{} @@ -1631,7 +1682,7 @@ func flattenEmrKerberosAttributes(d *schema.ResourceData, kerberosAttributes *em m := map[string]interface{}{ "kdc_admin_password": d.Get("kerberos_attributes.0.kdc_admin_password").(string), - "realm": *kerberosAttributes.Realm, + "realm": aws.StringValue(kerberosAttributes.Realm), } if v, ok := d.GetOk("kerberos_attributes.0.ad_domain_join_password"); ok { @@ -1700,13 +1751,13 @@ func flattenEBSConfig(ebsBlockDevices []*emr.EbsBlockDevice) *schema.Set { for _, ebs := range ebsBlockDevices { ebsAttrs := make(map[string]interface{}) if ebs.VolumeSpecification.Iops != nil { - ebsAttrs["iops"] = int(*ebs.VolumeSpecification.Iops) + ebsAttrs["iops"] = int(aws.Int64Value(ebs.VolumeSpecification.Iops)) } if ebs.VolumeSpecification.SizeInGB != nil { - ebsAttrs["size"] = int(*ebs.VolumeSpecification.SizeInGB) + ebsAttrs["size"] = int(aws.Int64Value(ebs.VolumeSpecification.SizeInGB)) } if ebs.VolumeSpecification.VolumeType != nil { - ebsAttrs["type"] = *ebs.VolumeSpecification.VolumeType + ebsAttrs["type"] = aws.StringValue(ebs.VolumeSpecification.VolumeType) } ebsAttrs["volumes_per_instance"] = 1 uniqueEBS[resourceClusterEBSHashConfig(ebsAttrs)] += 1 @@ -1724,8 +1775,8 @@ func flattenBootstrapArguments(actions []*emr.Command) []map[string]interface{} for _, b := range actions { attrs := make(map[string]interface{}) - attrs["name"] = *b.Name - attrs["path"] = *b.ScriptPath + attrs["name"] = aws.StringValue(b.Name) + attrs["path"] = aws.StringValue(b.ScriptPath) attrs["args"] = flex.FlattenStringList(b.Args) result = append(result, attrs) } @@ -1979,7 +2030,7 @@ func resourceClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) r func findMasterGroup(instanceGroups []*emr.InstanceGroup) *emr.InstanceGroup { for _, group := range instanceGroups { - if *group.InstanceGroupType == emr.InstanceRoleTypeMaster { + if aws.StringValue(group.InstanceGroupType) == emr.InstanceRoleTypeMaster { return group } } @@ -1992,8 +2043,8 @@ func resourceClusterEBSHashConfig(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", m["size"].(int))) buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) buf.WriteString(fmt.Sprintf("%d-", m["volumes_per_instance"].(int))) - if v, ok := m["iops"]; ok && v.(int) != 0 { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) + if v, ok := m["iops"].(int); ok && v != 0 { + buf.WriteString(fmt.Sprintf("%d-", v)) } return create.StringHashcode(buf.String()) } @@ -2138,7 +2189,7 @@ func flattenOnDemandSpecification(onDemandSpecification *emr.OnDemandProvisionin m := map[string]interface{}{ // The return value from api is wrong. it return "LOWEST_PRICE" instead of "lowest-price" // "allocation_strategy": aws.StringValue(onDemandSpecification.AllocationStrategy), - "allocation_strategy": "lowest-price", + "allocation_strategy": emr.OnDemandProvisioningAllocationStrategyLowestPrice, } return []interface{}{m} } @@ -2157,7 +2208,7 @@ func flattenSpotSpecification(spotSpecification *emr.SpotProvisioningSpecificati if spotSpecification.AllocationStrategy != nil { // The return value from api is wrong. It return "CAPACITY_OPTIMIZED" instead of "capacity-optimized" // m["allocation_strategy"] = aws.StringValue(spotSpecification.AllocationStrategy) - m["allocation_strategy"] = "capacity-optimized" + m["allocation_strategy"] = emr.SpotProvisioningAllocationStrategyCapacityOptimized } return []interface{}{m} @@ -2318,3 +2369,35 @@ func removeNil(data map[string]interface{}) map[string]interface{} { return withoutNil } + +func expandAutoTerminationPolicy(policy []interface{}) *emr.AutoTerminationPolicy { + if len(policy) == 0 || policy[0] == nil { + return nil + } + + m := policy[0].(map[string]interface{}) + app := &emr.AutoTerminationPolicy{} + + if v, ok := m["idle_timeout"].(int); ok && v > 0 { + app.IdleTimeout = aws.Int64(int64(v)) + } + + return app +} + +func flattenAutoTerminationPolicy(atp *emr.AutoTerminationPolicy) []map[string]interface{} { + attrs := map[string]interface{}{} + result := make([]map[string]interface{}, 0) + + if atp == nil { + return result + } + + if atp.IdleTimeout != nil { + attrs["idle_timeout"] = aws.Int64Value(atp.IdleTimeout) + } + + result = append(result, attrs) + + return result +} diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index 227ffb283e0a..4e3040e2f7c6 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -4,7 +4,6 @@ import ( "fmt" "regexp" "testing" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -16,7 +15,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" tfemr "github.com/hashicorp/terraform-provider-aws/internal/service/emr" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccEMRCluster_basic(t *testing.T) { @@ -39,6 +37,7 @@ func TestAccEMRCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "arn"), resource.TestCheckNoResourceAttr(resourceName, "additional_info"), resource.TestCheckResourceAttr(resourceName, "bootstrap_action.#", "0"), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "0"), ), }, { @@ -55,6 +54,62 @@ func TestAccEMRCluster_basic(t *testing.T) { }) } +func TestAccEMRCluster_autoTerminationPolicy(t *testing.T) { + var cluster emr.Cluster + + resourceName := "aws_emr_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, emr.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterAutoTerminationConfig(rName, 10000), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.0.idle_timeout", "10000"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "cluster_state", // Ignore RUNNING versus WAITING changes + "configurations", + "keep_job_flow_alive_when_no_steps", + }, + }, + { + Config: testAccClusterAutoTerminationConfig(rName, 20000), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.0.idle_timeout", "20000"), + ), + }, + { + Config: testAccClusterEC2AttributesDefaultManagedSecurityGroupsConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "0"), + ), + }, + { + Config: testAccClusterAutoTerminationConfig(rName, 20000), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.0.idle_timeout", "20000"), + ), + }, + }, + }) +} + func TestAccEMRCluster_additionalInfo(t *testing.T) { var cluster emr.Cluster expectedJSON := ` @@ -112,7 +167,8 @@ func TestAccEMRCluster_disappears(t *testing.T) { Config: testAccClusterConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &cluster), - testAccCheckClusterDisappears(&cluster), + acctest.CheckResourceDisappears(acctest.Provider, tfemr.ResourceCluster(), resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfemr.ResourceCluster(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -1528,63 +1584,63 @@ func testAccCheckClusterExists(n string, v *emr.Cluster) resource.TestCheckFunc } } -func testAccCheckClusterDisappears(cluster *emr.Cluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EMRConn - id := aws.StringValue(cluster.Id) +// func testAccCheckClusterDisappears(cluster *emr.Cluster) resource.TestCheckFunc { +// return func(s *terraform.State) error { +// conn := acctest.Provider.Meta().(*conns.AWSClient).EMRConn +// id := aws.StringValue(cluster.Id) - terminateJobFlowsInput := &emr.TerminateJobFlowsInput{ - JobFlowIds: []*string{cluster.Id}, - } +// terminateJobFlowsInput := &emr.TerminateJobFlowsInput{ +// JobFlowIds: []*string{cluster.Id}, +// } - _, err := conn.TerminateJobFlows(terminateJobFlowsInput) +// _, err := conn.TerminateJobFlows(terminateJobFlowsInput) - if err != nil { - return err - } +// if err != nil { +// return err +// } - input := &emr.ListInstancesInput{ - ClusterId: cluster.Id, - } - var output *emr.ListInstancesOutput - var instanceCount int +// input := &emr.ListInstancesInput{ +// ClusterId: cluster.Id, +// } +// var output *emr.ListInstancesOutput +// var instanceCount int - err = resource.Retry(20*time.Minute, func() *resource.RetryError { - var err error - output, err = conn.ListInstances(input) +// err = resource.Retry(20*time.Minute, func() *resource.RetryError { +// var err error +// output, err = conn.ListInstances(input) - if err != nil { - return resource.NonRetryableError(err) - } +// if err != nil { +// return resource.NonRetryableError(err) +// } - instanceCount = tfemr.CountRemainingInstances(output, id) +// instanceCount = tfemr.CountRemainingInstances(output, id) - if instanceCount != 0 { - return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", id, instanceCount)) - } +// if instanceCount != 0 { +// return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", id, instanceCount)) +// } - return nil - }) +// return nil +// }) - if tfresource.TimedOut(err) { - output, err = conn.ListInstances(input) +// if tfresource.TimedOut(err) { +// output, err = conn.ListInstances(input) - if err == nil { - instanceCount = tfemr.CountRemainingInstances(output, id) - } - } +// if err == nil { +// instanceCount = tfemr.CountRemainingInstances(output, id) +// } +// } - if instanceCount != 0 { - return fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", id, instanceCount) - } +// if instanceCount != 0 { +// return fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", id, instanceCount) +// } - if err != nil { - return fmt.Errorf("error waiting for EMR Cluster (%s) Instances to drain: %w", id, err) - } +// if err != nil { +// return fmt.Errorf("error waiting for EMR Cluster (%s) Instances to drain: %w", id, err) +// } - return nil - } -} +// return nil +// } +// } func testAccCheckClusterNotRecreated(i, j *emr.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -2535,7 +2591,7 @@ resource "aws_emr_cluster" "test" { applications = ["Spark"] keep_job_flow_alive_when_no_steps = true name = %[1]q - release_label = "emr-5.28.0" + release_label = "emr-5.33.1" service_role = "EMR_DefaultRole" ec2_attributes { @@ -3835,3 +3891,34 @@ resource "aws_emr_cluster" "test" { } `, rName)) } + +func testAccClusterAutoTerminationConfig(rName string, timeout int) string { + return acctest.ConfigCompose( + testAccClusterBaseVPCConfig(rName, false), + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_emr_cluster" "test" { + auto_termination_policy { + idle_timeout = %[2]d + } + + applications = ["Spark"] + keep_job_flow_alive_when_no_steps = true + name = %[1]q + release_label = "emr-5.33.1" + service_role = "EMR_DefaultRole" + + ec2_attributes { + instance_profile = "EMR_EC2_DefaultRole" + subnet_id = aws_subnet.test.id + } + + master_instance_group { + instance_type = "m4.large" + } + + depends_on = [aws_route_table_association.test] +} +`, rName, timeout)) +} diff --git a/internal/service/emr/instance_group_test.go b/internal/service/emr/instance_group_test.go index e8054c6453c1..8e87490b4d80 100644 --- a/internal/service/emr/instance_group_test.go +++ b/internal/service/emr/instance_group_test.go @@ -236,7 +236,7 @@ func TestAccEMRInstanceGroup_Disappears_emrCluster(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(emrClusterResourceName, &cluster), testAccCheckInstanceGroupExists(resourceName, &ig), - testAccCheckClusterDisappears(&cluster), + acctest.CheckResourceDisappears(acctest.Provider, tfemr.ResourceCluster(), emrClusterResourceName), ), ExpectNonEmptyPlan: true, }, From b9723c60315ef81502d3dc8d32394bf1a147f1d8 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Wed, 10 Nov 2021 00:39:53 +0200 Subject: [PATCH 098/304] remove commented out code --- internal/service/emr/cluster_test.go | 58 ---------------------------- 1 file changed, 58 deletions(-) diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index 4e3040e2f7c6..9f94dd443f9d 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -1584,64 +1584,6 @@ func testAccCheckClusterExists(n string, v *emr.Cluster) resource.TestCheckFunc } } -// func testAccCheckClusterDisappears(cluster *emr.Cluster) resource.TestCheckFunc { -// return func(s *terraform.State) error { -// conn := acctest.Provider.Meta().(*conns.AWSClient).EMRConn -// id := aws.StringValue(cluster.Id) - -// terminateJobFlowsInput := &emr.TerminateJobFlowsInput{ -// JobFlowIds: []*string{cluster.Id}, -// } - -// _, err := conn.TerminateJobFlows(terminateJobFlowsInput) - -// if err != nil { -// return err -// } - -// input := &emr.ListInstancesInput{ -// ClusterId: cluster.Id, -// } -// var output *emr.ListInstancesOutput -// var instanceCount int - -// err = resource.Retry(20*time.Minute, func() *resource.RetryError { -// var err error -// output, err = conn.ListInstances(input) - -// if err != nil { -// return resource.NonRetryableError(err) -// } - -// instanceCount = tfemr.CountRemainingInstances(output, id) - -// if instanceCount != 0 { -// return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", id, instanceCount)) -// } - -// return nil -// }) - -// if tfresource.TimedOut(err) { -// output, err = conn.ListInstances(input) - -// if err == nil { -// instanceCount = tfemr.CountRemainingInstances(output, id) -// } -// } - -// if instanceCount != 0 { -// return fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", id, instanceCount) -// } - -// if err != nil { -// return fmt.Errorf("error waiting for EMR Cluster (%s) Instances to drain: %w", id, err) -// } - -// return nil -// } -// } - func testAccCheckClusterNotRecreated(i, j *emr.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.Id) != aws.StringValue(j.Id) { From 418641ed85b9a43a6ed5d0c63658c906f1805a34 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Wed, 10 Nov 2021 11:21:43 +0200 Subject: [PATCH 099/304] refactor cluster to use waiter/finder --- internal/service/emr/cluster.go | 110 +++------------------------ internal/service/emr/cluster_test.go | 10 ++- internal/service/emr/find.go | 44 +++++++++++ internal/service/emr/status.go | 24 ++++++ internal/service/emr/wait.go | 39 ++++++++++ 5 files changed, 125 insertions(+), 102 deletions(-) create mode 100644 internal/service/emr/find.go create mode 100644 internal/service/emr/status.go create mode 100644 internal/service/emr/wait.go diff --git a/internal/service/emr/cluster.go b/internal/service/emr/cluster.go index c3544b20ffbe..8631456b1bb7 100644 --- a/internal/service/emr/cluster.go +++ b/internal/service/emr/cluster.go @@ -11,7 +11,6 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" "github.com/aws/aws-sdk-go/service/emr" "github.com/hashicorp/aws-sdk-go-base/tfawserr" @@ -970,33 +969,16 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { d.Set("keep_job_flow_alive_when_no_steps", params.Instances.KeepJobFlowAliveWhenNoSteps) log.Println("[INFO] Waiting for EMR Cluster to be available") + cluster, err := waitClusterCreated(conn, d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{ - emr.ClusterStateBootstrapping, - emr.ClusterStateStarting, - }, - Target: []string{ - emr.ClusterStateRunning, - emr.ClusterStateWaiting, - }, - Refresh: resourceClusterStateRefreshFunc(d, meta), - Timeout: 75 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - clusterRaw, err := stateConf.WaitForState() if err != nil { - return fmt.Errorf("Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\": %w", err) + return fmt.Errorf("error waiting for EMR Cluster (%s) to be created: %w", d.Id(), err) } // For multiple master nodes, EMR automatically enables // termination protection and ignores the configuration at launch. // This additional handling is to potentially disable termination // protection to match the desired Terraform configuration. - cluster := clusterRaw.(*emr.Cluster) - if aws.BoolValue(cluster.TerminationProtected) != terminationProtection { input := &emr.SetTerminationProtectionInput{ JobFlowIds: []*string{aws.String(d.Id())}, @@ -1016,49 +998,21 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - req := &emr.DescribeClusterInput{ - ClusterId: aws.String(d.Id()), - } + cluster, err := FindClusterByID(conn, d.Id()) - resp, err := conn.DescribeCluster(req) - if err != nil { - // After a Cluster has been terminated for an indeterminate period of time, - // the EMR API will return this type of error: - // InvalidRequestException: Cluster id 'j-XXX' is not valid. - // If this causes issues with masking other legitimate request errors, the - // handling should be updated for deeper inspection of the special error type - // which includes an accurate error code: - // ErrorCode: "NoSuchCluster", - if tfawserr.ErrMessageContains(err, emr.ErrCodeInvalidRequestException, "is not valid") { - log.Printf("[DEBUG] EMR Cluster (%s) not found", d.Id()) - d.SetId("") - return nil - } - return fmt.Errorf("Error reading EMR cluster: %w", err) - } - - if resp.Cluster == nil { - log.Printf("[DEBUG] EMR Cluster (%s) not found", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] EMR Cluster (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - cluster := resp.Cluster - - if cluster.Status != nil { - state := aws.StringValue(cluster.Status.State) - - if state == emr.ClusterStateTerminated || state == emr.ClusterStateTerminatedWithErrors { - log.Printf("[WARN] EMR Cluster (%s) was %s already, removing from state", d.Id(), state) - d.SetId("") - return nil - } - - d.Set("cluster_state", state) - - d.Set("arn", cluster.ClusterArn) + if err != nil { + return fmt.Errorf("error reading EMR Cluster (%s): %w", d.Id(), err) } + d.Set("cluster_state", cluster.Status.State) + d.Set("arn", cluster.ClusterArn) + instanceGroups, err := fetchAllEMRInstanceGroups(conn, d.Id()) if err == nil { // find instance group @@ -1984,50 +1938,6 @@ func readBodyJson(body string, target interface{}) error { return nil } -func resourceClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*conns.AWSClient).EMRConn - - log.Printf("[INFO] Reading EMR Cluster Information: %s", d.Id()) - params := &emr.DescribeClusterInput{ - ClusterId: aws.String(d.Id()), - } - - resp, err := conn.DescribeCluster(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ClusterNotFound" { - return 42, "destroyed", nil - } - } - log.Printf("[WARN] Error on retrieving EMR Cluster (%s) when waiting: %s", d.Id(), err) - return nil, "", err - } - - if resp.Cluster == nil { - return 42, "destroyed", nil - } - - if resp.Cluster.Status == nil { - return resp.Cluster, "", fmt.Errorf("cluster status not provided") - } - - state := aws.StringValue(resp.Cluster.Status.State) - log.Printf("[DEBUG] EMR Cluster status (%s): %s", d.Id(), state) - - if state == emr.ClusterStateTerminating || state == emr.ClusterStateTerminatedWithErrors { - reason := resp.Cluster.Status.StateChangeReason - if reason == nil { - return resp.Cluster, state, fmt.Errorf("%s: reason code and message not provided", state) - } - return resp.Cluster, state, fmt.Errorf("%s: %s: %s", state, aws.StringValue(reason.Code), aws.StringValue(reason.Message)) - } - - return resp.Cluster, state, nil - } -} - func findMasterGroup(instanceGroups []*emr.InstanceGroup) *emr.InstanceGroup { for _, group := range instanceGroups { if aws.StringValue(group.InstanceGroupType) == emr.InstanceRoleTypeMaster { diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index 9f94dd443f9d..e30ec14144b9 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -32,9 +32,15 @@ func TestAccEMRCluster_basic(t *testing.T) { Config: testAccClusterConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "elasticmapreduce", regexp.MustCompile("cluster/.+$")), + resource.TestCheckResourceAttr(resourceName, "release_label", "emr-4.6.0"), + resource.TestCheckResourceAttr(resourceName, "applications.#", "1"), resource.TestCheckResourceAttr(resourceName, "scale_down_behavior", "TERMINATE_AT_TASK_COMPLETION"), resource.TestCheckResourceAttr(resourceName, "step.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "ebs_root_volume_size", "21"), + resource.TestCheckResourceAttrPair(resourceName, "autoscaling_role", "aws_iam_role.emr_service", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "service_role", "aws_iam_role.emr_autoscaling_role", "arn"), resource.TestCheckNoResourceAttr(resourceName, "additional_info"), resource.TestCheckResourceAttr(resourceName, "bootstrap_action.#", "0"), resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "0"), @@ -3860,7 +3866,7 @@ resource "aws_emr_cluster" "test" { instance_type = "m4.large" } - depends_on = [aws_route_table_association.test] + depends_on = [aws_route_table_association.test] } `, rName, timeout)) } diff --git a/internal/service/emr/find.go b/internal/service/emr/find.go new file mode 100644 index 000000000000..1edae0fc4ada --- /dev/null +++ b/internal/service/emr/find.go @@ -0,0 +1,44 @@ +package emr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func FindClusterByID(conn *emr.EMR, id string) (*emr.Cluster, error) { + input := &emr.DescribeClusterInput{ + ClusterId: aws.String(id), + } + + output, err := conn.DescribeCluster(input) + + if tfawserr.ErrCodeEquals(err, "ClusterNotFound") || tfawserr.ErrMessageContains(err, emr.ErrCodeInvalidRequestException, "is not valid") { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Cluster == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + status := output.Cluster.Status + state := aws.StringValue(status.State) + + if state == emr.ClusterStateTerminated || state == emr.ClusterStateTerminatedWithErrors { + return nil, &resource.NotFoundError{ + Message: aws.StringValue(status.StateChangeReason.Message), + LastRequest: input, + } + } + + return output.Cluster, nil +} diff --git a/internal/service/emr/status.go b/internal/service/emr/status.go new file mode 100644 index 000000000000..9bd3658d0424 --- /dev/null +++ b/internal/service/emr/status.go @@ -0,0 +1,24 @@ +package emr + +import ( + "github.com/aws/aws-sdk-go/aws" + emr "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func statusCluster(conn *emr.EMR, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindClusterByID(conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status.State), nil + } +} diff --git a/internal/service/emr/wait.go b/internal/service/emr/wait.go new file mode 100644 index 000000000000..0909f0e195cf --- /dev/null +++ b/internal/service/emr/wait.go @@ -0,0 +1,39 @@ +package emr + +import ( + "time" + + emr "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + ClusterCreatedTimeout = 75 * time.Minute + ClusterCreatedMinTimeout = 10 * time.Second + ClusterCreatedDelay = 30 * time.Second +) + +func waitClusterCreated(conn *emr.EMR, id string) (*emr.Cluster, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + emr.ClusterStateBootstrapping, + emr.ClusterStateStarting, + }, + Target: []string{ + emr.ClusterStateRunning, + emr.ClusterStateWaiting, + }, + Refresh: statusCluster(conn, id), + Timeout: ClusterCreatedTimeout, + MinTimeout: ClusterCreatedMinTimeout, + Delay: ClusterCreatedDelay, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*emr.Cluster); ok { + return output, err + } + + return nil, err +} From 894b4f1fe8ff96e3455bfcb948798289aab2fbb4 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Wed, 10 Nov 2021 13:14:43 +0200 Subject: [PATCH 100/304] tests --- internal/service/emr/cluster_test.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index e30ec14144b9..6282f89b7dca 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -36,14 +36,26 @@ func TestAccEMRCluster_basic(t *testing.T) { acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "elasticmapreduce", regexp.MustCompile("cluster/.+$")), resource.TestCheckResourceAttr(resourceName, "release_label", "emr-4.6.0"), resource.TestCheckResourceAttr(resourceName, "applications.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "applications.*", "Spark"), + resource.TestCheckResourceAttr(resourceName, "master_instance_group.#", "1"), + resource.TestCheckResourceAttr(resourceName, "master_instance_group.0.instance_type", "c4.large"), + resource.TestCheckResourceAttr(resourceName, "core_instance_group.#", "1"), + resource.TestCheckResourceAttr(resourceName, "core_instance_group.0.instance_type", "c4.large"), + resource.TestCheckResourceAttr(resourceName, "core_instance_group.0.instance_count", "1"), + resource.TestCheckResourceAttr(resourceName, "ec2_attributes.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "ec2_attributes.0.subnet_id", "aws_subnet.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "ec2_attributes.0.emr_managed_master_security_group", "aws_security_group.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "ec2_attributes.0.emr_managed_slave_security_group", "aws_security_group.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "ec2_attributes.0.instance_profile", "aws_iam_instance_profile.emr_instance_profile", "arn"), resource.TestCheckResourceAttr(resourceName, "scale_down_behavior", "TERMINATE_AT_TASK_COMPLETION"), - resource.TestCheckResourceAttr(resourceName, "step.#", "0"), resource.TestCheckResourceAttr(resourceName, "ebs_root_volume_size", "21"), - resource.TestCheckResourceAttrPair(resourceName, "autoscaling_role", "aws_iam_role.emr_service", "arn"), - resource.TestCheckResourceAttrPair(resourceName, "service_role", "aws_iam_role.emr_autoscaling_role", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "service_role", "aws_iam_role.emr_service", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "autoscaling_role", "aws_iam_role.emr_autoscaling_role", "arn"), resource.TestCheckNoResourceAttr(resourceName, "additional_info"), resource.TestCheckResourceAttr(resourceName, "bootstrap_action.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kerberos_attributes.#", "0"), resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "step.#", "0"), ), }, { From 62aec6a76768ecd594bcafde304ee0ed48a87996 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Wed, 10 Nov 2021 13:40:14 +0200 Subject: [PATCH 101/304] changelog --- .changelog/21702.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21702.txt diff --git a/.changelog/21702.txt b/.changelog/21702.txt new file mode 100644 index 000000000000..5d88af078c39 --- /dev/null +++ b/.changelog/21702.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_emr_cluster: Add support for `auto_termination_policy`. +``` \ No newline at end of file From 1c4c7e4311aedb154d533e897144faaa5270b971 Mon Sep 17 00:00:00 2001 From: Ashish Date: Wed, 10 Nov 2021 08:25:49 -0800 Subject: [PATCH 102/304] Add filter documentation Source: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-transit-gateways.html --- website/docs/d/ec2_transit_gateway.html.markdown | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/website/docs/d/ec2_transit_gateway.html.markdown b/website/docs/d/ec2_transit_gateway.html.markdown index 72b19e5bdcea..44b106428d8a 100644 --- a/website/docs/d/ec2_transit_gateway.html.markdown +++ b/website/docs/d/ec2_transit_gateway.html.markdown @@ -43,6 +43,22 @@ The following arguments are supported: * `name` - (Required) Name of the filter. * `values` - (Required) List of one or more values for the filter. +#### Supported values for Filters + +One or more filters are supported. The possible values are: + +* `options.propagation-default-route-table-id` - The ID of the default propagation route table. +* `options.amazon-side-asn` - The private ASN for the Amazon side of a BGP session. +* `options.association-default-route-table-id` - The ID of the default association route table. +* `options.auto-accept-shared-attachments` - Indicates whether there is automatic acceptance of attachment requests (enable | disable ). +* `options.default-route-table-association` - Indicates whether resource attachments are automatically associated with the default association route table (enable | disable ). +* `options.default-route-table-propagation` - Indicates whether resource attachments automatically propagate routes to the default propagation route table (enable | disable ). +* `options.dns-support` - Indicates whether DNS support is enabled (enable | disable ). +* `options.vpn-ecmp-support` - Indicates whether Equal Cost Multipath Protocol support is enabled (enable | disable ). +* `owner-id` - The ID of the Amazon Web Services account that owns the transit gateway. +* `state` - The state of the transit gateway (available | deleted | deleting | modifying | pending ). +* `transit-gateway-id` - The ID of the transit gateway + ## Attribute Reference In addition to all arguments above, the following attributes are exported: From ffe9b3c0939c94ac5535e21f24ae067f624c5ec8 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 10 Nov 2021 15:46:49 -0800 Subject: [PATCH 103/304] Uses `ConfigVpcWithSubnets()` This function is in a draft PR, so pulling it in early --- internal/acctest/acctest.go | 19 +++++++++++++ .../appstream/directory_config_test.go | 27 ++++++------------- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/internal/acctest/acctest.go b/internal/acctest/acctest.go index eb1f6cda9f1f..43028a1075eb 100644 --- a/internal/acctest/acctest.go +++ b/internal/acctest/acctest.go @@ -2473,6 +2473,25 @@ resource "aws_security_group" "sg_for_lambda" { `, policyName, roleName, sgName) } +func ConfigVpcWithSubnets(subnetCount int) string { + return ConfigCompose( + ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = %[1]d + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} +`, subnetCount), + ) +} + func CheckVPCExists(n string, vpc *ec2.Vpc) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index 9c60e984a8ac..be695a2a8b5f 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -146,17 +146,16 @@ func testAccCheckDirectoryConfigDestroy(s *terraform.State) error { func testAccDirectoryConfigConfig(name, userName, password string) string { return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), + acctest.ConfigVpcWithSubnets(2), fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" -} +resource "aws_appstream_directory_config" "test" { + directory_name = %[1]q + organizational_unit_distinguished_names = [aws_directory_service_directory.test.id] -resource "aws_subnet" "test" { - count = 2 - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = "10.0.${count.index}.0/24" - vpc_id = aws_vpc.test.id + service_account_credentials { + account_name = %[2]q + account_password = %[3]q + } } resource "aws_directory_service_directory" "test" { @@ -170,15 +169,5 @@ resource "aws_directory_service_directory" "test" { subnet_ids = aws_subnet.test[*].id } } - -resource "aws_appstream_directory_config" "test" { - directory_name = %[1]q - organizational_unit_distinguished_names = [aws_directory_service_directory.test.id] - - service_account_credentials { - account_name = %[2]q - account_password = %[3]q - } -} `, name, userName, password)) } From 4f2f8f59ec5f9e3ad7f8558482c705b4e7955ad2 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Thu, 11 Nov 2021 16:21:30 +0200 Subject: [PATCH 104/304] docs --- website/docs/r/emr_cluster.html.markdown | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/website/docs/r/emr_cluster.html.markdown b/website/docs/r/emr_cluster.html.markdown index fe7d07477408..478bcbf2e70b 100644 --- a/website/docs/r/emr_cluster.html.markdown +++ b/website/docs/r/emr_cluster.html.markdown @@ -620,6 +620,7 @@ The following arguments are optional: * `additional_info` - (Optional) JSON string for selecting additional features such as adding proxy information. Note: Currently there is no API to retrieve the value of this argument after EMR cluster creation from provider, therefore Terraform cannot detect drift from the actual EMR cluster if its value is changed outside Terraform. * `applications` - (Optional) List of applications for the cluster. Valid values are: `Flink`, `Hadoop`, `Hive`, `Mahout`, `Pig`, `Spark`, and `JupyterHub` (as of EMR 5.14.0). Case insensitive. * `autoscaling_role` - (Optional) IAM role for automatic scaling policies. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group. +* `auto_termination_policy` - (Optional) An auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. See [Auto Termination Policy](#auto_termination_policy) Below. * `bootstrap_action` - (Optional) Ordered list of bootstrap actions that will be run before Hadoop is started on the cluster nodes. See below. * `configurations` - (Optional) List of configurations supplied for the EMR cluster you are creating. Supply a configuration object for applications to override their default configuration. See [AWS Documentation](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html) for more information. * `configurations_json` - (Optional) JSON string for supplying list of configurations for the EMR cluster. @@ -668,12 +669,16 @@ EOF * `termination_protection` - (Optional) Switch on/off termination protection (default is `false`, except when using multiple master nodes). Before attempting to destroy the resource when termination protection is enabled, this configuration must be applied with its value set to `false`. * `visible_to_all_users` - (Optional) Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. Default value is `true`. -### bootstrap_action +### auto_termination_policy * `args` - (Optional) List of command line arguments to pass to the bootstrap action script. * `name` - (Required) Name of the bootstrap action. * `path` - (Required) Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system. +### bootstrap_action + +* `idle_timeout` - (Optional) Specifies the amount of idle time in seconds after which the cluster automatically terminates. You can specify a minimum of `60` seconds and a maximum of `604800` seconds (seven days). + ### configurations A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster. See [Configuring Applications](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html). From 681e3b2bf0e8c47378e14585edb1441e67a102ef Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Thu, 11 Nov 2021 18:28:20 +0200 Subject: [PATCH 105/304] fix auto teminate test --- internal/service/emr/cluster_test.go | 37 +++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index 6282f89b7dca..c61e0569c888 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -110,7 +110,7 @@ func TestAccEMRCluster_autoTerminationPolicy(t *testing.T) { ), }, { - Config: testAccClusterEC2AttributesDefaultManagedSecurityGroupsConfig(rName), + Config: testAccClusterNoAutoTerminationConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "auto_termination_policy.#", "0"), @@ -3870,8 +3870,10 @@ resource "aws_emr_cluster" "test" { service_role = "EMR_DefaultRole" ec2_attributes { - instance_profile = "EMR_EC2_DefaultRole" - subnet_id = aws_subnet.test.id + instance_profile = "EMR_EC2_DefaultRole" + subnet_id = aws_subnet.test.id + emr_managed_master_security_group = aws_security_group.test.id + emr_managed_slave_security_group = aws_security_group.test.id } master_instance_group { @@ -3882,3 +3884,32 @@ resource "aws_emr_cluster" "test" { } `, rName, timeout)) } + +func testAccClusterNoAutoTerminationConfig(rName string) string { + return acctest.ConfigCompose( + testAccClusterBaseVPCConfig(rName, false), + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_emr_cluster" "test" { + applications = ["Spark"] + keep_job_flow_alive_when_no_steps = true + name = %[1]q + release_label = "emr-5.33.1" + service_role = "EMR_DefaultRole" + + ec2_attributes { + instance_profile = "EMR_EC2_DefaultRole" + subnet_id = aws_subnet.test.id + emr_managed_master_security_group = aws_security_group.test.id + emr_managed_slave_security_group = aws_security_group.test.id + } + + master_instance_group { + instance_type = "m4.large" + } + + depends_on = [aws_route_table_association.test] +} +`, rName)) +} From d62a7199340b42e813914306a3c9c41eb4188d94 Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Thu, 11 Nov 2021 11:48:15 -0600 Subject: [PATCH 106/304] Correct cloudfront_response_headers_policy docs --- ...ront_response_headers_policy.html.markdown | 55 ++++++++++--------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/website/docs/r/cloudfront_response_headers_policy.html.markdown b/website/docs/r/cloudfront_response_headers_policy.html.markdown index 399bb7203b58..66e32727b9d3 100644 --- a/website/docs/r/cloudfront_response_headers_policy.html.markdown +++ b/website/docs/r/cloudfront_response_headers_policy.html.markdown @@ -54,12 +54,12 @@ The following arguments are supported: ### Cors Config -* `access_control_allow_credentials` - (Required) A Boolean value that CloudFront uses as the value for the Access-Control-Allow-Credentials HTTP response header. -* `access_control_allow_headers` - (Required) Object that contains an attribute `items` that contains a list of HTTP header names that CloudFront includes as values for the Access-Control-Allow-Headers HTTP response header. -* `access_control_allow_methods` - (Required) Object that contains an attribute `items` that contains a list of HTTP methods that CloudFront includes as values for the Access-Control-Allow-Methods HTTP response header. Valid values: `GET` | `POST` | `OPTIONS` | `PUT` | `DELETE` | `HEAD` | `ALL` -* `access_control_allow_origins` - (Optional) Object that contains an attribute `items` that contains a list of origins that CloudFront can use as the value for the Access-Control-Allow-Origin HTTP response header. -* `access_control_expose_headers` - (Optional) Object that contains an attribute `items` that contains a list of HTTP headers that CloudFront includes as values for the Access-Control-Expose-Headers HTTP response header. -* `access_control_max_age_sec` - (Required) A number that CloudFront uses as the value for the Access-Control-Max-Age HTTP response header. +* `access_control_allow_credentials` - (Required) A Boolean value that CloudFront uses as the value for the `Access-Control-Allow-Credentials` HTTP response header. +* `access_control_allow_headers` - (Required) Object that contains an attribute `items` that contains a list of HTTP header names that CloudFront includes as values for the `Access-Control-Allow-Headers` HTTP response header. +* `access_control_allow_methods` - (Required) Object that contains an attribute `items` that contains a list of HTTP methods that CloudFront includes as values for the `Access-Control-Allow-Methods` HTTP response header. Valid values: `GET` | `POST` | `OPTIONS` | `PUT` | `DELETE` | `HEAD` | `ALL` +* `access_control_allow_origins` - (Optional) Object that contains an attribute `items` that contains a list of origins that CloudFront can use as the value for the `Access-Control-Allow-Origin` HTTP response header. +* `access_control_expose_headers` - (Optional) Object that contains an attribute `items` that contains a list of HTTP headers that CloudFront includes as values for the `Access-Control-Expose-Headers` HTTP response header. +* `access_control_max_age_sec` - (Required) A number that CloudFront uses as the value for the `Access-Control-Max-Age` HTTP response header. ### Custom Header @@ -69,44 +69,45 @@ The following arguments are supported: ### Security Headers Config -* `content_security_policy` - (Optional) The policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. See [Content Security Policy](#content_security_policy) for more information. -* `content_type_options` - (Optional) TA setting that determines whether CloudFront includes the X-Content-Type-Options HTTP response header with its value set to nosniff. See [Content Type Options](#content_type_options) for more information. -* `frame_options` - (Optional) TA setting that determines whether CloudFront includes the X-Frame-Options HTTP response header and the header’s value. See [Frame Options](#frame_options) for more information. -* `referrer_policy` - (Optional) TA setting that determines whether CloudFront includes the Referrer-Policy HTTP response header and the header’s value. See [Referrer Policy](#referrer_policy) for more information. -* `xss_protection` - (Optional) TSettings that determine whether CloudFront includes the X-XSS-Protection HTTP response header and the header’s value. See [XSS Protection](#xss_protection) for more information. +* `content_security_policy` - (Optional) The policy directives and their values that CloudFront includes as values for the `Content-Security-Policy` HTTP response header. See [Content Security Policy](#content_security_policy) for more information. +* `content_type_options` - (Optional) Determines whether CloudFront includes the `X-Content-Type-Options` HTTP response header with its value set to `nosniff`. See [Content Type Options](#content_type_options) for more information. +* `frame_options` - (Optional) Determines whether CloudFront includes the `X-Frame-Options` HTTP response header and the header’s value. See [Frame Options](#frame_options) for more information. +* `referrer_policy` - (Optional) Determines whether CloudFront includes the `Referrer-Policy` HTTP response header and the header’s value. See [Referrer Policy](#referrer_policy) for more information. +* `strict_transport_security` - (Optional) Determines whether CloudFront includes the `Strict-Transport-Security` HTTP response header and the header’s value. See [Strict Transport Security](#strict_transport_security) for more information. +* `xss_protection` - (Optional) Determine whether CloudFront includes the `X-XSS-Protection` HTTP response header and the header’s value. See [XSS Protection](#xss_protection) for more information. ### Content Security Policy -* `content_security_policy` - (Required) TThe policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. -* `override` - (Required) A Boolean value that determines whether CloudFront overrides the Content-Security-Policy HTTP response header received from the origin with the one specified in this response headers policy. +* `content_security_policy` - (Required) The policy directives and their values that CloudFront includes as values for the `Content-Security-Policy` HTTP response header. +* `override` - (Required) A Boolean value that determines whether CloudFront overrides the `Content-Security-Policy` HTTP response header received from the origin with the one specified in this response headers policy. ### Content Type Options -* `override` - (Required) A Boolean value that determines whether CloudFront overrides the X-Content-Type-Options HTTP response header received from the origin with the one specified in this response headers policy. +* `override` - (Required) A Boolean value that determines whether CloudFront overrides the `X-Content-Type-Options` HTTP response header received from the origin with the one specified in this response headers policy. ### Frame Options -* `frame_option` - (Required) The value of the X-Frame-Options HTTP response header. Valid values: `DENY` | `SAMEORIGIN` -* `override` - (Required) A Boolean value that determines whether CloudFront overrides the X-Frame-Options HTTP response header received from the origin with the one specified in this response headers policy. +* `frame_option` - (Required) The value of the `X-Frame-Options` HTTP response header. Valid values: `DENY` | `SAMEORIGIN` +* `override` - (Required) A Boolean value that determines whether CloudFront overrides the `X-Frame-Options` HTTP response header received from the origin with the one specified in this response headers policy. ### Referrer Policy -* `referrer_policy` - (Required) The value of the Referrer-Policy HTTP response header. Valid Values: `no-referrer` | `no-referrer-when-downgrade` | `origin` | `origin-when-cross-origin` | `same-origin` | `strict-origin` | `strict-origin-when-cross-origin` | `unsafe-url` -* `override` - (Required) A Boolean value that determines whether CloudFront overrides the Referrer-Policy HTTP response header received from the origin with the one specified in this response headers policy. +* `referrer_policy` - (Required) The value of the `Referrer-Policy` HTTP response header. Valid Values: `no-referrer` | `no-referrer-when-downgrade` | `origin` | `origin-when-cross-origin` | `same-origin` | `strict-origin` | `strict-origin-when-cross-origin` | `unsafe-url` +* `override` - (Required) A Boolean value that determines whether CloudFront overrides the `Referrer-Policy` HTTP response header received from the origin with the one specified in this response headers policy. ### Strict Transport Security -* `access_control_max_age_sec` - (Required) A number that CloudFront uses as the value for the max-age directive in the Strict-Transport-Security HTTP response header. -* `include_subdomains` - (Optional) A Boolean value that determines whether CloudFront includes the includeSubDomains directive in the Strict-Transport-Security HTTP response header. -* `override` - (Required) A Boolean value that determines whether CloudFront overrides the Strict-Transport-Security HTTP response header received from the origin with the one specified in this response headers policy. -* `preload` - (Optional) A Boolean value that determines whether CloudFront includes the preload directive in the Strict-Transport-Security HTTP response header. +* `access_control_max_age_sec` - (Required) A number that CloudFront uses as the value for the `max-age` directive in the `Strict-Transport-Security` HTTP response header. +* `include_subdomains` - (Optional) A Boolean value that determines whether CloudFront includes the `includeSubDomains` directive in the `Strict-Transport-Security` HTTP response header. +* `override` - (Required) A Boolean value that determines whether CloudFront overrides the `Strict-Transport-Security` HTTP response header received from the origin with the one specified in this response headers policy. +* `preload` - (Optional) A Boolean value that determines whether CloudFront includes the `preload` directive in the `Strict-Transport-Security` HTTP response header. ### XSS Protection -* `mode_block` - (Required) A Boolean value that determines whether CloudFront includes the mode=block directive in the X-XSS-Protection header. -* `override` - (Required) A Boolean value that determines whether CloudFront overrides the X-XSS-Protection HTTP response header received from the origin with the one specified in this response headers policy. -* `protection` - (Required) A Boolean value that determines the value of the X-XSS-Protection HTTP response header. When this setting is true, the value of the X-XSS-Protection header is 1. When this setting is false, the value of the X-XSS-Protection header is 0. -* `report_uri` - (Optional) A Boolean value that determines whether CloudFront sets a reporting URI in the X-XSS-Protection header. +* `mode_block` - (Required) A Boolean value that determines whether CloudFront includes the `mode=block` directive in the `X-XSS-Protection` header. +* `override` - (Required) A Boolean value that determines whether CloudFront overrides the `X-XSS-Protection` HTTP response header received from the origin with the one specified in this response headers policy. +* `protection` - (Required) A Boolean value that determines the value of the `X-XSS-Protection` HTTP response header. When this setting is `true`, the value of the `X-XSS-Protection` header is `1`. When this setting is `false`, the value of the `X-XSS-Protection` header is `0`. +* `report_uri` - (Optional) A reporting URI, which CloudFront uses as the value of the report directive in the `X-XSS-Protection` header. You cannot specify a `report_uri` when `mode_block` is `true`. ## Attributes Reference @@ -121,4 +122,4 @@ Cloudfront Response Headers Policies can be imported using the `id`, e.g. ``` $ terraform import aws_cloudfront_response_headers_policy.policy 658327ea-f89d-4fab-a63d-7e88639e58f9 -``` \ No newline at end of file +``` From 50f71841a58047bd24f333cfff6a2911080917ed Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Thu, 11 Nov 2021 12:09:51 -0600 Subject: [PATCH 107/304] Complete sentence --- website/docs/r/cloudfront_response_headers_policy.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/cloudfront_response_headers_policy.html.markdown b/website/docs/r/cloudfront_response_headers_policy.html.markdown index 66e32727b9d3..fcff2e7a9e96 100644 --- a/website/docs/r/cloudfront_response_headers_policy.html.markdown +++ b/website/docs/r/cloudfront_response_headers_policy.html.markdown @@ -49,7 +49,7 @@ The following arguments are supported: * `name` - (Required) A unique name to identify the response headers policy. * `comment` - (Optional) A comment to describe the response headers policy. The comment cannot be longer than 128 characters. * `cors_config` - (Optional) A configuration for a set of HTTP response headers that are used for Cross-Origin Resource Sharing (CORS). See [Cors Config](#cors_config) for more information. -* `custom_headers_config` - (Optional) Object that contains an attribute `items` that contains a list of Custom Headers See [Custom Header](#custom_header) for more information. +* `custom_headers_config` - (Optional) Object that contains an attribute `items` that contains a list of custom headers. See [Custom Header](#custom_header) for more information. * `security_headers_config` - (Optional) A configuration for a set of security-related HTTP response headers. See [Security Headers Config](#security_headers_config) for more information. ### Cors Config From 87d65266b2d3af00bf0abcffbb0fab0f5d79f11c Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Thu, 11 Nov 2021 22:11:33 +0200 Subject: [PATCH 108/304] fmt --- internal/service/emr/cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index c61e0569c888..625cc9b48115 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -3901,7 +3901,7 @@ resource "aws_emr_cluster" "test" { ec2_attributes { instance_profile = "EMR_EC2_DefaultRole" subnet_id = aws_subnet.test.id - emr_managed_master_security_group = aws_security_group.test.id + emr_managed_master_security_group = aws_security_group.test.id emr_managed_slave_security_group = aws_security_group.test.id } From 8783f7b8bac91f97664f41b9d69d2796cbf8d976 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Thu, 11 Nov 2021 23:03:17 +0200 Subject: [PATCH 109/304] fmt --- internal/service/emr/cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index 625cc9b48115..a0de3c1857cd 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -3872,7 +3872,7 @@ resource "aws_emr_cluster" "test" { ec2_attributes { instance_profile = "EMR_EC2_DefaultRole" subnet_id = aws_subnet.test.id - emr_managed_master_security_group = aws_security_group.test.id + emr_managed_master_security_group = aws_security_group.test.id emr_managed_slave_security_group = aws_security_group.test.id } From 566590b67e5956deb2fc04180ea9118b8faf808f Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 11 Nov 2021 16:55:06 -0500 Subject: [PATCH 110/304] add pagination to security group finder method --- internal/service/ec2/find.go | 50 ++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 10 deletions(-) diff --git a/internal/service/ec2/find.go b/internal/service/ec2/find.go index e2800ee55bd1..7b6e50ed2fdd 100644 --- a/internal/service/ec2/find.go +++ b/internal/service/ec2/find.go @@ -553,7 +553,44 @@ func FindSecurityGroupByNameAndVPCID(conn *ec2.EC2, name, vpcID string) (*ec2.Se // FindSecurityGroup looks up a security group using an ec2.DescribeSecurityGroupsInput. Returns a resource.NotFoundError if not found. func FindSecurityGroup(conn *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) (*ec2.SecurityGroup, error) { - result, err := conn.DescribeSecurityGroups(input) + output, err := FindSecurityGroups(conn, input) + + if err != nil { + return nil, err + } + + if len(output) == 0 || output[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + if count := len(output); count > 1 { + return nil, tfresource.NewTooManyResultsError(count, input) + } + + return output[0], nil +} + +// FindSecurityGroups returns an array of security groups that match an ec2.DescribeSecurityGroupsInput. +// Returns a resource.NotFoundError if no group is found for a specified SecurityGroup or SecurityGroupId. +func FindSecurityGroups(conn *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) { + var output []*ec2.SecurityGroup + + err := conn.DescribeSecurityGroupsPages(input, func(page *ec2.DescribeSecurityGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, sg := range page.SecurityGroups { + if sg == nil { + continue + } + + output = append(output, sg) + } + + return !lastPage + }) + if tfawserr.ErrCodeEquals(err, InvalidSecurityGroupIDNotFound) || tfawserr.ErrCodeEquals(err, InvalidGroupNotFound) { return nil, &resource.NotFoundError{ @@ -561,19 +598,12 @@ func FindSecurityGroup(conn *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) (* LastRequest: input, } } + if err != nil { return nil, err } - if result == nil || len(result.SecurityGroups) == 0 || result.SecurityGroups[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if len(result.SecurityGroups) > 1 { - return nil, tfresource.NewTooManyResultsError(len(result.SecurityGroups), input) - } - - return result.SecurityGroups[0], nil + return output, nil } // FindSpotInstanceRequestByID looks up a SpotInstanceRequest by ID. When not found, returns nil and potentially an API error. From 006becab8aa355f1da6426d24c20f6a61e14a0e8 Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Thu, 11 Nov 2021 16:53:29 -0600 Subject: [PATCH 111/304] Add custom_headers_config example --- ...ront_response_headers_policy.html.markdown | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/website/docs/r/cloudfront_response_headers_policy.html.markdown b/website/docs/r/cloudfront_response_headers_policy.html.markdown index fcff2e7a9e96..7ebce8f89e00 100644 --- a/website/docs/r/cloudfront_response_headers_policy.html.markdown +++ b/website/docs/r/cloudfront_response_headers_policy.html.markdown @@ -15,7 +15,7 @@ When it’s attached to a cache behavior, CloudFront adds the headers in the pol ## Example Usage -The following example below creates a CloudFront response headers policy. +The example below creates a CloudFront response headers policy. ```terraform resource "aws_cloudfront_response_headers_policy" "example" { @@ -42,6 +42,28 @@ resource "aws_cloudfront_response_headers_policy" "example" { } ``` +The example below creates a CloudFront response headers policy with a custom headers config. + +```terraform +resource "aws_cloudfront_response_headers_policy" "example" { + name = "example-headers-policy" + + custom_headers_config { + items { + header = "X-Permitted-Cross-Domain-Policies" + override = true + value = "none" + } + + items { + header = "X-Test" + override = true + value = "none" + } + } +} +``` + ## Argument Reference The following arguments are supported: From 9fb4ffa5e18d96a06bb16be510960db0e36b6f2c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Nov 2021 16:55:55 -0500 Subject: [PATCH 112/304] lexmodels: Fix labeler --- .github/labeler-issue-triage.yml | 2 +- .github/labeler-pr-triage.yml | 4 ++-- infrastructure/repository/labels-service.tf | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/labeler-issue-triage.yml b/.github/labeler-issue-triage.yml index 905054d75abc..35753a544ab5 100644 --- a/.github/labeler-issue-triage.yml +++ b/.github/labeler-issue-triage.yml @@ -222,7 +222,7 @@ service/lakeformation: - '((\*|-) ?`?|(data|resource) "?)aws_lakeformation_' service/lambda: - '((\*|-) ?`?|(data|resource) "?)aws_lambda_' -service/lexmodelbuildingservice: +service/lexmodels: - '((\*|-) ?`?|(data|resource) "?)aws_lex_' service/licensemanager: - '((\*|-) ?`?|(data|resource) "?)aws_licensemanager_' diff --git a/.github/labeler-pr-triage.yml b/.github/labeler-pr-triage.yml index 0e16554d8087..4c55b45a9915 100644 --- a/.github/labeler-pr-triage.yml +++ b/.github/labeler-pr-triage.yml @@ -383,8 +383,8 @@ service/lakeformation: service/lambda: - 'internal/service/lambda/**/*' - 'website/**/lambda_*' -service/lexmodelbuildingservice: - - 'internal/service/lexmodelbuilding/**/*' +service/lexmodels: + - 'internal/service/lexmodels/**/*' - 'website/**/lex_*' service/licensemanager: - 'internal/service/licensemanager/**/*' diff --git a/infrastructure/repository/labels-service.tf b/infrastructure/repository/labels-service.tf index 8466558fe4eb..71282a3a3609 100644 --- a/infrastructure/repository/labels-service.tf +++ b/infrastructure/repository/labels-service.tf @@ -124,7 +124,7 @@ variable "service_labels" { "kms", "lakeformation", "lambda", - "lexmodelbuildingservice", + "lexmodels", "licensemanager", "lightsail", "location", From 7e16ee3ab46b2430392f4335091e22ba58113030 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Nov 2021 17:04:33 -0500 Subject: [PATCH 113/304] lexmodels: Update conns --- internal/conns/conns.go | 8 ++++---- internal/service/docdb/sweep.go | 1 + internal/service/lexmodelbuilding/bot.go | 8 ++++---- internal/service/lexmodelbuilding/bot_alias.go | 8 ++++---- .../service/lexmodelbuilding/bot_alias_data_source.go | 2 +- internal/service/lexmodelbuilding/bot_alias_test.go | 4 ++-- internal/service/lexmodelbuilding/bot_data_source.go | 2 +- internal/service/lexmodelbuilding/bot_test.go | 6 +++--- internal/service/lexmodelbuilding/intent.go | 8 ++++---- internal/service/lexmodelbuilding/intent_data_source.go | 2 +- internal/service/lexmodelbuilding/intent_test.go | 8 ++++---- internal/service/lexmodelbuilding/slot_type.go | 8 ++++---- .../service/lexmodelbuilding/slot_type_data_source.go | 2 +- internal/service/lexmodelbuilding/slot_type_test.go | 6 +++--- internal/service/lexmodelbuilding/sweep.go | 8 ++++---- 15 files changed, 41 insertions(+), 40 deletions(-) diff --git a/internal/conns/conns.go b/internal/conns/conns.go index b565cbb82783..020fa86d89eb 100644 --- a/internal/conns/conns.go +++ b/internal/conns/conns.go @@ -433,7 +433,7 @@ const ( KMS = "kms" LakeFormation = "lakeformation" Lambda = "lambda" - LexModelBuilding = "lexmodelbuilding" + LexModels = "lexmodels" LexModelsV2 = "lexmodelsv2" LexRuntime = "lexruntime" LexRuntimeV2 = "lexruntimev2" @@ -717,7 +717,7 @@ func init() { serviceData[KMS] = &ServiceDatum{AWSClientName: "KMS", AWSServiceName: kms.ServiceName, AWSEndpointsID: kms.EndpointsID, AWSServiceID: kms.ServiceID, ProviderNameUpper: "KMS", HCLKeys: []string{"kms"}} serviceData[LakeFormation] = &ServiceDatum{AWSClientName: "LakeFormation", AWSServiceName: lakeformation.ServiceName, AWSEndpointsID: lakeformation.EndpointsID, AWSServiceID: lakeformation.ServiceID, ProviderNameUpper: "LakeFormation", HCLKeys: []string{"lakeformation"}} serviceData[Lambda] = &ServiceDatum{AWSClientName: "Lambda", AWSServiceName: lambda.ServiceName, AWSEndpointsID: lambda.EndpointsID, AWSServiceID: lambda.ServiceID, ProviderNameUpper: "Lambda", HCLKeys: []string{"lambda"}} - serviceData[LexModelBuilding] = &ServiceDatum{AWSClientName: "LexModelBuildingService", AWSServiceName: lexmodelbuildingservice.ServiceName, AWSEndpointsID: lexmodelbuildingservice.EndpointsID, AWSServiceID: lexmodelbuildingservice.ServiceID, ProviderNameUpper: "LexModelBuilding", HCLKeys: []string{"lexmodels", "lexmodelbuilding", "lexmodelbuildingservice"}} + serviceData[LexModels] = &ServiceDatum{AWSClientName: "LexModelBuildingService", AWSServiceName: lexmodelbuildingservice.ServiceName, AWSEndpointsID: lexmodelbuildingservice.EndpointsID, AWSServiceID: lexmodelbuildingservice.ServiceID, ProviderNameUpper: "LexModels", HCLKeys: []string{"lexmodels", "lexmodelbuilding", "lexmodelbuildingservice"}} serviceData[LexModelsV2] = &ServiceDatum{AWSClientName: "LexModelsV2", AWSServiceName: lexmodelsv2.ServiceName, AWSEndpointsID: lexmodelsv2.EndpointsID, AWSServiceID: lexmodelsv2.ServiceID, ProviderNameUpper: "LexModelsV2", HCLKeys: []string{"lexmodelsv2"}} serviceData[LexRuntime] = &ServiceDatum{AWSClientName: "LexRuntimeService", AWSServiceName: lexruntimeservice.ServiceName, AWSEndpointsID: lexruntimeservice.EndpointsID, AWSServiceID: lexruntimeservice.ServiceID, ProviderNameUpper: "LexRuntime", HCLKeys: []string{"lexruntime", "lexruntimeservice"}} serviceData[LexRuntimeV2] = &ServiceDatum{AWSClientName: "LexRuntimeV2", AWSServiceName: lexruntimev2.ServiceName, AWSEndpointsID: lexruntimev2.EndpointsID, AWSServiceID: lexruntimev2.ServiceID, ProviderNameUpper: "LexRuntimeV2", HCLKeys: []string{"lexruntimev2"}} @@ -1029,7 +1029,7 @@ type AWSClient struct { KMSConn *kms.KMS LakeFormationConn *lakeformation.LakeFormation LambdaConn *lambda.Lambda - LexModelBuildingConn *lexmodelbuildingservice.LexModelBuildingService + LexModelsConn *lexmodelbuildingservice.LexModelBuildingService LexModelsV2Conn *lexmodelsv2.LexModelsV2 LexRuntimeConn *lexruntimeservice.LexRuntimeService LexRuntimeV2Conn *lexruntimev2.LexRuntimeV2 @@ -1381,7 +1381,7 @@ func (c *Config) Client() (interface{}, error) { KMSConn: kms.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[KMS])})), LakeFormationConn: lakeformation.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[LakeFormation])})), LambdaConn: lambda.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[Lambda])})), - LexModelBuildingConn: lexmodelbuildingservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[LexModelBuilding])})), + LexModelsConn: lexmodelbuildingservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[LexModels])})), LexModelsV2Conn: lexmodelsv2.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[LexModelsV2])})), LexRuntimeConn: lexruntimeservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[LexRuntime])})), LexRuntimeV2Conn: lexruntimev2.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[LexRuntimeV2])})), diff --git a/internal/service/docdb/sweep.go b/internal/service/docdb/sweep.go index f3e74fab42f6..3e8b432f1963 100644 --- a/internal/service/docdb/sweep.go +++ b/internal/service/docdb/sweep.go @@ -4,6 +4,7 @@ package docdb import ( + "context" "fmt" "log" diff --git a/internal/service/lexmodelbuilding/bot.go b/internal/service/lexmodelbuilding/bot.go index 68fca744fcce..086a07f4a89a 100644 --- a/internal/service/lexmodelbuilding/bot.go +++ b/internal/service/lexmodelbuilding/bot.go @@ -217,7 +217,7 @@ var validateLexBotVersion = validation.All( ) func resourceBotCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn name := d.Get("name").(string) input := &lexmodelbuildingservice.PutBotInput{ @@ -273,7 +273,7 @@ func resourceBotCreate(d *schema.ResourceData, meta interface{}) error { } func resourceBotRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn output, err := FindBotVersionByName(conn, d.Id(), BotVersionLatest) @@ -343,7 +343,7 @@ func resourceBotRead(d *schema.ResourceData, meta interface{}) error { } func resourceBotUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn input := &lexmodelbuildingservice.PutBotInput{ Checksum: aws.String(d.Get("checksum").(string)), @@ -388,7 +388,7 @@ func resourceBotUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceBotDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn input := &lexmodelbuildingservice.DeleteBotInput{ Name: aws.String(d.Id()), diff --git a/internal/service/lexmodelbuilding/bot_alias.go b/internal/service/lexmodelbuilding/bot_alias.go index 12e6d3b94d8d..6f9ea1d12245 100644 --- a/internal/service/lexmodelbuilding/bot_alias.go +++ b/internal/service/lexmodelbuilding/bot_alias.go @@ -116,7 +116,7 @@ var validateLexBotAliasName = validation.All( ) func resourceBotAliasCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn botName := d.Get("bot_name").(string) botAliasName := d.Get("name").(string) @@ -169,7 +169,7 @@ func resourceBotAliasCreate(d *schema.ResourceData, meta interface{}) error { } func resourceBotAliasRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn resp, err := conn.GetBotAlias(&lexmodelbuildingservice.GetBotAliasInput{ BotName: aws.String(d.Get("bot_name").(string)), @@ -209,7 +209,7 @@ func resourceBotAliasRead(d *schema.ResourceData, meta interface{}) error { } func resourceBotAliasUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn input := &lexmodelbuildingservice.PutBotAliasInput{ BotName: aws.String(d.Get("bot_name").(string)), @@ -259,7 +259,7 @@ func resourceBotAliasUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceBotAliasDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn botName := d.Get("bot_name").(string) botAliasName := d.Get("name").(string) diff --git a/internal/service/lexmodelbuilding/bot_alias_data_source.go b/internal/service/lexmodelbuilding/bot_alias_data_source.go index a2da2f14bfde..becfa8b4f6e1 100644 --- a/internal/service/lexmodelbuilding/bot_alias_data_source.go +++ b/internal/service/lexmodelbuilding/bot_alias_data_source.go @@ -55,7 +55,7 @@ func DataSourceBotAlias() *schema.Resource { } func dataSourceBotAliasRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn botName := d.Get("bot_name").(string) botAliasName := d.Get("name").(string) diff --git a/internal/service/lexmodelbuilding/bot_alias_test.go b/internal/service/lexmodelbuilding/bot_alias_test.go index 8cfaa63e92e7..a5922bbe25c6 100644 --- a/internal/service/lexmodelbuilding/bot_alias_test.go +++ b/internal/service/lexmodelbuilding/bot_alias_test.go @@ -361,7 +361,7 @@ func testAccCheckBotAliasExists(rName string, output *lexmodelbuildingservice.Ge botAliasName := rs.Primary.Attributes["name"] var err error - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn output, err = conn.GetBotAlias(&lexmodelbuildingservice.GetBotAliasInput{ BotName: aws.String(botName), @@ -380,7 +380,7 @@ func testAccCheckBotAliasExists(rName string, output *lexmodelbuildingservice.Ge func testAccCheckBotAliasDestroy(botName, botAliasName string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn _, err := conn.GetBotAlias(&lexmodelbuildingservice.GetBotAliasInput{ BotName: aws.String(botName), diff --git a/internal/service/lexmodelbuilding/bot_data_source.go b/internal/service/lexmodelbuilding/bot_data_source.go index 57f262cea814..4977ac3ac9aa 100644 --- a/internal/service/lexmodelbuilding/bot_data_source.go +++ b/internal/service/lexmodelbuilding/bot_data_source.go @@ -86,7 +86,7 @@ func DataSourceBot() *schema.Resource { } func dataSourceBotRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn name := d.Get("name").(string) version := d.Get("version").(string) diff --git a/internal/service/lexmodelbuilding/bot_test.go b/internal/service/lexmodelbuilding/bot_test.go index 5be0eb741e4f..ca2832a5bb3a 100644 --- a/internal/service/lexmodelbuilding/bot_test.go +++ b/internal/service/lexmodelbuilding/bot_test.go @@ -737,7 +737,7 @@ func testAccCheckBotExistsWithVersion(rName, botVersion string, v *lexmodelbuild return fmt.Errorf("No Lex Bot ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn output, err := tflexmodelbuilding.FindBotVersionByName(conn, rs.Primary.ID, botVersion) @@ -757,7 +757,7 @@ func testAccCheckBotExists(rName string, output *lexmodelbuildingservice.GetBotO func testAccCheckBotNotExists(botName, botVersion string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn _, err := tflexmodelbuilding.FindBotVersionByName(conn, botName, botVersion) @@ -774,7 +774,7 @@ func testAccCheckBotNotExists(botName, botVersion string) resource.TestCheckFunc } func testAccCheckBotDestroy(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lex_bot" { diff --git a/internal/service/lexmodelbuilding/intent.go b/internal/service/lexmodelbuilding/intent.go index b8d7300619f2..bfeb26f88b56 100644 --- a/internal/service/lexmodelbuilding/intent.go +++ b/internal/service/lexmodelbuilding/intent.go @@ -282,7 +282,7 @@ func hasIntentConfigChanges(d verify.ResourceDiffer) bool { } func resourceIntentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn name := d.Get("name").(string) input := &lexmodelbuildingservice.PutIntentInput{ @@ -355,7 +355,7 @@ func resourceIntentCreate(d *schema.ResourceData, meta interface{}) error { } func resourceIntentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn resp, err := conn.GetIntent(&lexmodelbuildingservice.GetIntentInput{ Name: aws.String(d.Id()), @@ -433,7 +433,7 @@ func resourceIntentRead(d *schema.ResourceData, meta interface{}) error { } func resourceIntentUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn input := &lexmodelbuildingservice.PutIntentInput{ Checksum: aws.String(d.Get("checksum").(string)), @@ -503,7 +503,7 @@ func resourceIntentUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceIntentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn input := &lexmodelbuildingservice.DeleteIntentInput{ Name: aws.String(d.Id()), diff --git a/internal/service/lexmodelbuilding/intent_data_source.go b/internal/service/lexmodelbuilding/intent_data_source.go index 81231ba57707..ad79f2afdc21 100644 --- a/internal/service/lexmodelbuilding/intent_data_source.go +++ b/internal/service/lexmodelbuilding/intent_data_source.go @@ -64,7 +64,7 @@ func DataSourceIntent() *schema.Resource { } func dataSourceIntentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn intentName := d.Get("name").(string) resp, err := conn.GetIntent(&lexmodelbuildingservice.GetIntentInput{ diff --git a/internal/service/lexmodelbuilding/intent_test.go b/internal/service/lexmodelbuilding/intent_test.go index ee70c9155ab5..fb299e8ac77c 100644 --- a/internal/service/lexmodelbuilding/intent_test.go +++ b/internal/service/lexmodelbuilding/intent_test.go @@ -560,7 +560,7 @@ func TestAccLexModelBuildingIntent_updateWithExternalChange(t *testing.T) { testAccCheckAWSLexIntentUpdateDescription := func(provider *schema.Provider, _ *schema.Resource, resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := provider.Meta().(*conns.AWSClient).LexModelsConn resourceState, ok := s.RootModule().Resources[resourceName] if !ok { @@ -688,7 +688,7 @@ func testAccCheckIntentExistsWithVersion(rName, intentVersion string, output *le } var err error - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn output, err = conn.GetIntent(&lexmodelbuildingservice.GetIntentInput{ Name: aws.String(rs.Primary.ID), @@ -711,7 +711,7 @@ func testAccCheckIntentExists(rName string, output *lexmodelbuildingservice.GetI func testAccCheckIntentNotExists(intentName, intentVersion string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn _, err := conn.GetIntent(&lexmodelbuildingservice.GetIntentInput{ Name: aws.String(intentName), @@ -729,7 +729,7 @@ func testAccCheckIntentNotExists(intentName, intentVersion string) resource.Test } func testAccCheckIntentDestroy(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lex_intent" { diff --git a/internal/service/lexmodelbuilding/slot_type.go b/internal/service/lexmodelbuilding/slot_type.go index 0cf6b16983e2..cfb575e0650f 100644 --- a/internal/service/lexmodelbuilding/slot_type.go +++ b/internal/service/lexmodelbuilding/slot_type.go @@ -134,7 +134,7 @@ func hasSlotTypeConfigChanges(d verify.ResourceDiffer) bool { } func resourceSlotTypeCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn name := d.Get("name").(string) input := &lexmodelbuildingservice.PutSlotTypeInput{ @@ -170,7 +170,7 @@ func resourceSlotTypeCreate(d *schema.ResourceData, meta interface{}) error { } func resourceSlotTypeRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn output, err := FindSlotTypeVersionByName(conn, d.Id(), SlotTypeVersionLatest) @@ -207,7 +207,7 @@ func resourceSlotTypeRead(d *schema.ResourceData, meta interface{}) error { } func resourceSlotTypeUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn input := &lexmodelbuildingservice.PutSlotTypeInput{ Checksum: aws.String(d.Get("checksum").(string)), @@ -233,7 +233,7 @@ func resourceSlotTypeUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceSlotTypeDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn input := &lexmodelbuildingservice.DeleteSlotTypeInput{ Name: aws.String(d.Id()), diff --git a/internal/service/lexmodelbuilding/slot_type_data_source.go b/internal/service/lexmodelbuilding/slot_type_data_source.go index 7a8402c668a9..e7f69e3a2655 100644 --- a/internal/service/lexmodelbuilding/slot_type_data_source.go +++ b/internal/service/lexmodelbuilding/slot_type_data_source.go @@ -76,7 +76,7 @@ func DataSourceSlotType() *schema.Resource { } func dataSourceSlotTypeRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).LexModelBuildingConn + conn := meta.(*conns.AWSClient).LexModelsConn name := d.Get("name").(string) version := d.Get("version").(string) diff --git a/internal/service/lexmodelbuilding/slot_type_test.go b/internal/service/lexmodelbuilding/slot_type_test.go index 4a8fd394534a..983cd2eda577 100644 --- a/internal/service/lexmodelbuilding/slot_type_test.go +++ b/internal/service/lexmodelbuilding/slot_type_test.go @@ -381,7 +381,7 @@ func testAccCheckSlotTypeExistsWithVersion(rName, slotTypeVersion string, v *lex return fmt.Errorf("No Lex Slot Type ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn output, err := tflexmodelbuilding.FindSlotTypeVersionByName(conn, rs.Primary.ID, slotTypeVersion) @@ -401,7 +401,7 @@ func testAccCheckSlotTypeExists(rName string, output *lexmodelbuildingservice.Ge func testAccCheckSlotTypeNotExists(slotTypeName, slotTypeVersion string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn _, err := tflexmodelbuilding.FindSlotTypeVersionByName(conn, slotTypeName, slotTypeVersion) @@ -418,7 +418,7 @@ func testAccCheckSlotTypeNotExists(slotTypeName, slotTypeVersion string) resourc } func testAccCheckSlotTypeDestroy(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelBuildingConn + conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lex_slot_type" { diff --git a/internal/service/lexmodelbuilding/sweep.go b/internal/service/lexmodelbuilding/sweep.go index ac03eb0d2507..96f9222c0a56 100644 --- a/internal/service/lexmodelbuilding/sweep.go +++ b/internal/service/lexmodelbuilding/sweep.go @@ -47,7 +47,7 @@ func sweepBotAliases(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.(*conns.AWSClient).LexModelBuildingConn + conn := client.(*conns.AWSClient).LexModelsConn sweepResources := make([]*sweep.SweepResource, 0) var errs *multierror.Error @@ -120,7 +120,7 @@ func sweepBots(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.(*conns.AWSClient).LexModelBuildingConn + conn := client.(*conns.AWSClient).LexModelsConn sweepResources := make([]*sweep.SweepResource, 0) var errs *multierror.Error @@ -166,7 +166,7 @@ func sweepIntents(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.(*conns.AWSClient).LexModelBuildingConn + conn := client.(*conns.AWSClient).LexModelsConn sweepResources := make([]*sweep.SweepResource, 0) var errs *multierror.Error @@ -212,7 +212,7 @@ func sweepSlotTypes(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.(*conns.AWSClient).LexModelBuildingConn + conn := client.(*conns.AWSClient).LexModelsConn sweepResources := make([]*sweep.SweepResource, 0) var errs *multierror.Error From db4a8cb81a5f0f3b9d2e315ff29a088fc7378774 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Nov 2021 17:13:38 -0500 Subject: [PATCH 114/304] lexmodels: Update docs, generators --- internal/generate/listpages/main.go | 6 +++++- internal/generate/tags/main.go | 6 +++++- internal/service/lexmodelbuilding/README.md | 7 ++++--- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/internal/generate/listpages/main.go b/internal/generate/listpages/main.go index e6b25414b684..dc3b17f7ecff 100644 --- a/internal/generate/listpages/main.go +++ b/internal/generate/listpages/main.go @@ -298,6 +298,8 @@ func awsServiceName(s string) (string, error) { return "directoryservice", nil case "events": return "eventbridge", nil + case "lexmodels": + return "lexmodelbuildingservice", nil case "serverlessrepo": return "serverlessapplicationrepository", nil } @@ -331,6 +333,8 @@ func awsServiceNameUpper(s string) (string, error) { return awsServiceNames["directoryservice"], nil case "events": return awsServiceNames["eventbridge"], nil + case "lexmodels": + return awsServiceNames["lexmodelbuildingservice"], nil case "serverlessrepo": return awsServiceNames["serverlessapplicationrepository"], nil } @@ -500,7 +504,7 @@ func init() { awsServiceNames["kms"] = "KMS" awsServiceNames["lakeformation"] = "LakeFormation" awsServiceNames["lambda"] = "Lambda" - awsServiceNames["lexmodelbuilding"] = "LexModelBuilding" + awsServiceNames["lexmodelbuildingservice"] = "LexModelBuildingService" awsServiceNames["lexmodelsv2"] = "LexModelsV2" awsServiceNames["lexruntime"] = "LexRuntime" awsServiceNames["lexruntimev2"] = "LexRuntimeV2" diff --git a/internal/generate/tags/main.go b/internal/generate/tags/main.go index 256fd6673ff4..693aa8b548be 100644 --- a/internal/generate/tags/main.go +++ b/internal/generate/tags/main.go @@ -798,6 +798,8 @@ func awsServiceName(s string) (string, error) { return "directoryservice", nil case "events": return "eventbridge", nil + case "lexmodels": + return "lexmodelbuildingservice", nil case "serverlessrepo": return "serverlessapplicationrepository", nil } @@ -831,6 +833,8 @@ func awsServiceNameUpper(s string) (string, error) { return awsServiceNames["directoryservice"], nil case "events": return awsServiceNames["eventbridge"], nil + case "lexmodels": + return awsServiceNames["lexmodelbuildingservice"], nil case "serverlessrepo": return awsServiceNames["serverlessapplicationrepository"], nil } @@ -1007,7 +1011,7 @@ func init() { awsServiceNames["kms"] = "KMS" awsServiceNames["lakeformation"] = "LakeFormation" awsServiceNames["lambda"] = "Lambda" - awsServiceNames["lexmodelbuilding"] = "LexModelBuilding" + awsServiceNames["lexmodelbuildingservice"] = "LexModelBuildingService" awsServiceNames["lexmodelsv2"] = "LexModelsV2" awsServiceNames["lexruntime"] = "LexRuntime" awsServiceNames["lexruntimev2"] = "LexRuntimeV2" diff --git a/internal/service/lexmodelbuilding/README.md b/internal/service/lexmodelbuilding/README.md index 0d9f6d1d8807..89f3050858cd 100644 --- a/internal/service/lexmodelbuilding/README.md +++ b/internal/service/lexmodelbuilding/README.md @@ -1,10 +1,11 @@ -# Terraform AWS Provider LexModelBuilding Package +# Terraform AWS Provider Lex Model Building Service Package This area is primarily for AWS provider contributors and maintainers. For information on _using_ Terraform and the AWS provider, see the links below. +The AWS CLI refers to the Amazon Lex Model Building Service as _Lex Models_. ## Handy Links * [Find out about contributing](../../../docs/contributing) to the AWS provider! * AWS Provider Docs: [Home](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) -* AWS Provider Docs: [One of the LexModelBuilding resources](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lex_bot) -* AWS Docs: [AWS SDK for Go LexModelBuilding](https://docs.aws.amazon.com/sdk-for-go/api/service/lexmodelbuildingservice/) +* AWS Provider Docs: [One of the Lex Models resources](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lex_bot) +* AWS Docs: [AWS SDK for Go LexModelBuildingService](https://docs.aws.amazon.com/sdk-for-go/api/service/lexmodelbuildingservice/) From 9dbe1d92b91c01bc9b96b4f0755ebdf38e386bc8 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Nov 2021 17:22:13 -0500 Subject: [PATCH 115/304] lexmodels: Move to lexmodels --- internal/provider/provider.go | 18 ++++++++-------- .../{lexmodelbuilding => lexmodels}/README.md | 0 .../{lexmodelbuilding => lexmodels}/bot.go | 2 +- .../bot_alias.go | 2 +- .../bot_alias_data_source.go | 2 +- .../bot_alias_data_source_test.go | 2 +- .../bot_alias_test.go | 20 +++++++++--------- .../bot_data_source.go | 2 +- .../bot_data_source_test.go | 2 +- .../bot_test.go | 16 +++++++------- .../{lexmodelbuilding => lexmodels}/enum.go | 2 +- .../{lexmodelbuilding => lexmodels}/find.go | 2 +- .../{lexmodelbuilding => lexmodels}/intent.go | 2 +- .../intent_data_source.go | 2 +- .../intent_data_source_test.go | 2 +- .../intent_test.go | 12 +++++------ .../slot_type.go | 2 +- .../slot_type_data_source.go | 2 +- .../slot_type_data_source_test.go | 2 +- .../slot_type_test.go | 16 +++++++------- .../{lexmodelbuilding => lexmodels}/status.go | 2 +- .../{lexmodelbuilding => lexmodels}/sweep.go | 2 +- .../test-fixtures/lambdatest.zip | Bin .../{lexmodelbuilding => lexmodels}/wait.go | 2 +- internal/sweep/sweep_test.go | 2 +- 25 files changed, 59 insertions(+), 59 deletions(-) rename internal/service/{lexmodelbuilding => lexmodels}/README.md (100%) rename internal/service/{lexmodelbuilding => lexmodels}/bot.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/bot_alias.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/bot_alias_data_source.go (98%) rename internal/service/{lexmodelbuilding => lexmodels}/bot_alias_data_source_test.go (98%) rename internal/service/{lexmodelbuilding => lexmodels}/bot_alias_test.go (97%) rename internal/service/{lexmodelbuilding => lexmodels}/bot_data_source.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/bot_data_source_test.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/bot_test.go (98%) rename internal/service/{lexmodelbuilding => lexmodels}/enum.go (82%) rename internal/service/{lexmodelbuilding => lexmodels}/find.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/intent.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/intent_data_source.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/intent_data_source_test.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/intent_test.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/slot_type.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/slot_type_data_source.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/slot_type_data_source_test.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/slot_type_test.go (97%) rename internal/service/{lexmodelbuilding => lexmodels}/status.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/sweep.go (99%) rename internal/service/{lexmodelbuilding => lexmodels}/test-fixtures/lambdatest.zip (100%) rename internal/service/{lexmodelbuilding => lexmodels}/wait.go (99%) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 67e766c223e6..87db2d3e31bc 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -92,7 +92,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/kms" "github.com/hashicorp/terraform-provider-aws/internal/service/lakeformation" "github.com/hashicorp/terraform-provider-aws/internal/service/lambda" - "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodelbuilding" + "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" "github.com/hashicorp/terraform-provider-aws/internal/service/licensemanager" "github.com/hashicorp/terraform-provider-aws/internal/service/lightsail" "github.com/hashicorp/terraform-provider-aws/internal/service/macie" @@ -580,10 +580,10 @@ func Provider() *schema.Provider { "aws_lambda_invocation": lambda.DataSourceInvocation(), "aws_lambda_layer_version": lambda.DataSourceLayerVersion(), - "aws_lex_bot": lexmodelbuilding.DataSourceBot(), - "aws_lex_bot_alias": lexmodelbuilding.DataSourceBotAlias(), - "aws_lex_intent": lexmodelbuilding.DataSourceIntent(), - "aws_lex_slot_type": lexmodelbuilding.DataSourceSlotType(), + "aws_lex_bot": lexmodels.DataSourceBot(), + "aws_lex_bot_alias": lexmodels.DataSourceBotAlias(), + "aws_lex_intent": lexmodels.DataSourceIntent(), + "aws_lex_slot_type": lexmodels.DataSourceSlotType(), "aws_arn": meta.DataSourceARN(), "aws_billing_service_account": meta.DataSourceBillingServiceAccount(), @@ -1301,10 +1301,10 @@ func Provider() *schema.Provider { "aws_lambda_permission": lambda.ResourcePermission(), "aws_lambda_provisioned_concurrency_config": lambda.ResourceProvisionedConcurrencyConfig(), - "aws_lex_bot": lexmodelbuilding.ResourceBot(), - "aws_lex_bot_alias": lexmodelbuilding.ResourceBotAlias(), - "aws_lex_intent": lexmodelbuilding.ResourceIntent(), - "aws_lex_slot_type": lexmodelbuilding.ResourceSlotType(), + "aws_lex_bot": lexmodels.ResourceBot(), + "aws_lex_bot_alias": lexmodels.ResourceBotAlias(), + "aws_lex_intent": lexmodels.ResourceIntent(), + "aws_lex_slot_type": lexmodels.ResourceSlotType(), "aws_licensemanager_association": licensemanager.ResourceAssociation(), "aws_licensemanager_license_configuration": licensemanager.ResourceLicenseConfiguration(), diff --git a/internal/service/lexmodelbuilding/README.md b/internal/service/lexmodels/README.md similarity index 100% rename from internal/service/lexmodelbuilding/README.md rename to internal/service/lexmodels/README.md diff --git a/internal/service/lexmodelbuilding/bot.go b/internal/service/lexmodels/bot.go similarity index 99% rename from internal/service/lexmodelbuilding/bot.go rename to internal/service/lexmodels/bot.go index 086a07f4a89a..831c1d39ab32 100644 --- a/internal/service/lexmodelbuilding/bot.go +++ b/internal/service/lexmodels/bot.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "context" diff --git a/internal/service/lexmodelbuilding/bot_alias.go b/internal/service/lexmodels/bot_alias.go similarity index 99% rename from internal/service/lexmodelbuilding/bot_alias.go rename to internal/service/lexmodels/bot_alias.go index 6f9ea1d12245..017876ffc749 100644 --- a/internal/service/lexmodelbuilding/bot_alias.go +++ b/internal/service/lexmodels/bot_alias.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "fmt" diff --git a/internal/service/lexmodelbuilding/bot_alias_data_source.go b/internal/service/lexmodels/bot_alias_data_source.go similarity index 98% rename from internal/service/lexmodelbuilding/bot_alias_data_source.go rename to internal/service/lexmodels/bot_alias_data_source.go index becfa8b4f6e1..eb2c67b1289a 100644 --- a/internal/service/lexmodelbuilding/bot_alias_data_source.go +++ b/internal/service/lexmodels/bot_alias_data_source.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "fmt" diff --git a/internal/service/lexmodelbuilding/bot_alias_data_source_test.go b/internal/service/lexmodels/bot_alias_data_source_test.go similarity index 98% rename from internal/service/lexmodelbuilding/bot_alias_data_source_test.go rename to internal/service/lexmodels/bot_alias_data_source_test.go index 684b2eea4093..07e35b23f796 100644 --- a/internal/service/lexmodelbuilding/bot_alias_data_source_test.go +++ b/internal/service/lexmodels/bot_alias_data_source_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "testing" diff --git a/internal/service/lexmodelbuilding/bot_alias_test.go b/internal/service/lexmodels/bot_alias_test.go similarity index 97% rename from internal/service/lexmodelbuilding/bot_alias_test.go rename to internal/service/lexmodels/bot_alias_test.go index a5922bbe25c6..7aaace5fe8bb 100644 --- a/internal/service/lexmodelbuilding/bot_alias_test.go +++ b/internal/service/lexmodels/bot_alias_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "fmt" @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - tflexmodelbuilding "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodelbuilding" + tflexmodels "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" ) func TestAccLexModelBuildingBotAlias_basic(t *testing.T) { @@ -45,7 +45,7 @@ func TestAccLexModelBuildingBotAlias_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "description", "Testing lex bot alias create."), acctest.CheckResourceAttrRFC3339(resourceName, "last_updated_date"), resource.TestCheckResourceAttr(resourceName, "bot_name", testBotAliasID), - resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodelbuilding.BotVersionLatest), + resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodels.BotVersionLatest), resource.TestCheckResourceAttr(resourceName, "name", testBotAliasID), resource.TestCheckResourceAttr(resourceName, "conversation_logs.#", "0"), ), @@ -82,7 +82,7 @@ func testAccBotAlias_botVersion(t *testing.T) { ), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBotAliasExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodelbuilding.BotVersionLatest), + resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodels.BotVersionLatest), ), }, { @@ -138,7 +138,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsText(t *testing.T) { ), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBotAliasExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodelbuilding.BotVersionLatest), + resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodels.BotVersionLatest), resource.TestCheckResourceAttrPair(resourceName, "conversation_logs.0.iam_role_arn", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "conversation_logs.0.log_settings.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "conversation_logs.0.log_settings.*", map[string]string{ @@ -148,7 +148,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsText(t *testing.T) { }), resource.TestCheckTypeSetElemAttrPair(resourceName, "conversation_logs.0.log_settings.*.resource_arn", cloudwatchLogGroupResourceName, "arn"), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "conversation_logs.0.log_settings.*", map[string]*regexp.Regexp{ - "resource_prefix": regexp.MustCompile(regexp.QuoteMeta(fmt.Sprintf(`aws/lex/%s/%s/%s/`, testBotID, testBotAliasID, tflexmodelbuilding.BotVersionLatest))), + "resource_prefix": regexp.MustCompile(regexp.QuoteMeta(fmt.Sprintf(`aws/lex/%s/%s/%s/`, testBotID, testBotAliasID, tflexmodels.BotVersionLatest))), }), ), }, @@ -188,7 +188,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsAudio(t *testing.T) { ), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBotAliasExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodelbuilding.BotVersionLatest), + resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodels.BotVersionLatest), resource.TestCheckResourceAttrPair(resourceName, "conversation_logs.0.iam_role_arn", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "conversation_logs.0.log_settings.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "conversation_logs.0.log_settings.*", map[string]string{ @@ -198,7 +198,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsAudio(t *testing.T) { resource.TestCheckTypeSetElemAttrPair(resourceName, "conversation_logs.0.log_settings.*.resource_arn", s3BucketResourceName, "arn"), resource.TestCheckTypeSetElemAttrPair(resourceName, "conversation_logs.0.log_settings.*.kms_key_arn", kmsKeyResourceName, "arn"), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "conversation_logs.0.log_settings.*", map[string]*regexp.Regexp{ - "resource_prefix": regexp.MustCompile(regexp.QuoteMeta(fmt.Sprintf(`aws/lex/%s/%s/%s/`, testBotID, testBotAliasID, tflexmodelbuilding.BotVersionLatest))), + "resource_prefix": regexp.MustCompile(regexp.QuoteMeta(fmt.Sprintf(`aws/lex/%s/%s/%s/`, testBotID, testBotAliasID, tflexmodels.BotVersionLatest))), }), ), }, @@ -239,7 +239,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsBoth(t *testing.T) { ), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBotAliasExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodelbuilding.BotVersionLatest), + resource.TestCheckResourceAttr(resourceName, "bot_version", tflexmodels.BotVersionLatest), resource.TestCheckResourceAttrPair(resourceName, "conversation_logs.0.iam_role_arn", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "conversation_logs.0.log_settings.#", "2"), @@ -338,7 +338,7 @@ func TestAccLexModelBuildingBotAlias_disappears(t *testing.T) { ), Check: resource.ComposeTestCheckFunc( testAccCheckBotAliasExists(resourceName, &v), - acctest.CheckResourceDisappears(acctest.Provider, tflexmodelbuilding.ResourceBotAlias(), resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tflexmodels.ResourceBotAlias(), resourceName), ), ExpectNonEmptyPlan: true, }, diff --git a/internal/service/lexmodelbuilding/bot_data_source.go b/internal/service/lexmodels/bot_data_source.go similarity index 99% rename from internal/service/lexmodelbuilding/bot_data_source.go rename to internal/service/lexmodels/bot_data_source.go index 4977ac3ac9aa..2f9839086f67 100644 --- a/internal/service/lexmodelbuilding/bot_data_source.go +++ b/internal/service/lexmodels/bot_data_source.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "fmt" diff --git a/internal/service/lexmodelbuilding/bot_data_source_test.go b/internal/service/lexmodels/bot_data_source_test.go similarity index 99% rename from internal/service/lexmodelbuilding/bot_data_source_test.go rename to internal/service/lexmodels/bot_data_source_test.go index 98bbee7c9212..d76b62054ad5 100644 --- a/internal/service/lexmodelbuilding/bot_data_source_test.go +++ b/internal/service/lexmodels/bot_data_source_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "testing" diff --git a/internal/service/lexmodelbuilding/bot_test.go b/internal/service/lexmodels/bot_test.go similarity index 98% rename from internal/service/lexmodelbuilding/bot_test.go rename to internal/service/lexmodels/bot_test.go index ca2832a5bb3a..11422c3fb3a7 100644 --- a/internal/service/lexmodelbuilding/bot_test.go +++ b/internal/service/lexmodels/bot_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "fmt" @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - tflexmodelbuilding "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodelbuilding" + tflexmodels "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -68,7 +68,7 @@ func TestAccLexModelBuildingBot_basic(t *testing.T) { resource.TestCheckResourceAttr(rName, "nlu_intent_confidence_threshold", "0"), resource.TestCheckResourceAttr(rName, "process_behavior", "SAVE"), resource.TestCheckResourceAttr(rName, "status", "NOT_BUILT"), - resource.TestCheckResourceAttr(rName, "version", tflexmodelbuilding.BotVersionLatest), + resource.TestCheckResourceAttr(rName, "version", tflexmodels.BotVersionLatest), resource.TestCheckNoResourceAttr(rName, "voice_id"), ), }, @@ -120,7 +120,7 @@ func testAccBot_createVersion(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBotExists(rName, &v1), testAccCheckBotNotExists(testBotID, "1"), - resource.TestCheckResourceAttr(rName, "version", tflexmodelbuilding.BotVersionLatest), + resource.TestCheckResourceAttr(rName, "version", tflexmodels.BotVersionLatest), resource.TestCheckResourceAttr(rName, "description", "Bot to order flowers on the behalf of a user"), ), }, @@ -718,7 +718,7 @@ func TestAccLexModelBuildingBot_disappears(t *testing.T) { ), Check: resource.ComposeTestCheckFunc( testAccCheckBotExists(rName, &v), - acctest.CheckResourceDisappears(acctest.Provider, tflexmodelbuilding.ResourceBot(), rName), + acctest.CheckResourceDisappears(acctest.Provider, tflexmodels.ResourceBot(), rName), ), ExpectNonEmptyPlan: true, }, @@ -739,7 +739,7 @@ func testAccCheckBotExistsWithVersion(rName, botVersion string, v *lexmodelbuild conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn - output, err := tflexmodelbuilding.FindBotVersionByName(conn, rs.Primary.ID, botVersion) + output, err := tflexmodels.FindBotVersionByName(conn, rs.Primary.ID, botVersion) if err != nil { return err @@ -752,14 +752,14 @@ func testAccCheckBotExistsWithVersion(rName, botVersion string, v *lexmodelbuild } func testAccCheckBotExists(rName string, output *lexmodelbuildingservice.GetBotOutput) resource.TestCheckFunc { - return testAccCheckBotExistsWithVersion(rName, tflexmodelbuilding.BotVersionLatest, output) + return testAccCheckBotExistsWithVersion(rName, tflexmodels.BotVersionLatest, output) } func testAccCheckBotNotExists(botName, botVersion string) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn - _, err := tflexmodelbuilding.FindBotVersionByName(conn, botName, botVersion) + _, err := tflexmodels.FindBotVersionByName(conn, botName, botVersion) if tfresource.NotFound(err) { return nil diff --git a/internal/service/lexmodelbuilding/enum.go b/internal/service/lexmodels/enum.go similarity index 82% rename from internal/service/lexmodelbuilding/enum.go rename to internal/service/lexmodels/enum.go index b20aac298cd8..f038374c2efc 100644 --- a/internal/service/lexmodelbuilding/enum.go +++ b/internal/service/lexmodels/enum.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels const ( BotVersionLatest = "$LATEST" diff --git a/internal/service/lexmodelbuilding/find.go b/internal/service/lexmodels/find.go similarity index 99% rename from internal/service/lexmodelbuilding/find.go rename to internal/service/lexmodels/find.go index c24d28ab367e..c4cf4f2b81b0 100644 --- a/internal/service/lexmodelbuilding/find.go +++ b/internal/service/lexmodels/find.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "strconv" diff --git a/internal/service/lexmodelbuilding/intent.go b/internal/service/lexmodels/intent.go similarity index 99% rename from internal/service/lexmodelbuilding/intent.go rename to internal/service/lexmodels/intent.go index bfeb26f88b56..9ba126451f49 100644 --- a/internal/service/lexmodelbuilding/intent.go +++ b/internal/service/lexmodels/intent.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "context" diff --git a/internal/service/lexmodelbuilding/intent_data_source.go b/internal/service/lexmodels/intent_data_source.go similarity index 99% rename from internal/service/lexmodelbuilding/intent_data_source.go rename to internal/service/lexmodels/intent_data_source.go index ad79f2afdc21..cc65e1848eb7 100644 --- a/internal/service/lexmodelbuilding/intent_data_source.go +++ b/internal/service/lexmodels/intent_data_source.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "fmt" diff --git a/internal/service/lexmodelbuilding/intent_data_source_test.go b/internal/service/lexmodels/intent_data_source_test.go similarity index 99% rename from internal/service/lexmodelbuilding/intent_data_source_test.go rename to internal/service/lexmodels/intent_data_source_test.go index 3e0fe2895ebb..028903e547d4 100644 --- a/internal/service/lexmodelbuilding/intent_data_source_test.go +++ b/internal/service/lexmodels/intent_data_source_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "testing" diff --git a/internal/service/lexmodelbuilding/intent_test.go b/internal/service/lexmodels/intent_test.go similarity index 99% rename from internal/service/lexmodelbuilding/intent_test.go rename to internal/service/lexmodels/intent_test.go index fb299e8ac77c..6d31fb44bdde 100644 --- a/internal/service/lexmodelbuilding/intent_test.go +++ b/internal/service/lexmodels/intent_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "fmt" @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - tflexmodelbuilding "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodelbuilding" + tflexmodels "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" ) func TestAccLexModelBuildingIntent_basic(t *testing.T) { @@ -53,7 +53,7 @@ func TestAccLexModelBuildingIntent_basic(t *testing.T) { resource.TestCheckNoResourceAttr(rName, "rejection_statement"), resource.TestCheckNoResourceAttr(rName, "sample_utterances"), resource.TestCheckNoResourceAttr(rName, "slot"), - resource.TestCheckResourceAttr(rName, "version", tflexmodelbuilding.IntentVersionLatest), + resource.TestCheckResourceAttr(rName, "version", tflexmodels.IntentVersionLatest), ), }, { @@ -545,7 +545,7 @@ func TestAccLexModelBuildingIntent_disappears(t *testing.T) { Config: testAccIntentConfig_basic(testIntentID), Check: resource.ComposeTestCheckFunc( testAccCheckIntentExists(rName, &v), - acctest.CheckResourceDisappears(acctest.Provider, tflexmodelbuilding.ResourceIntent(), rName), + acctest.CheckResourceDisappears(acctest.Provider, tflexmodels.ResourceIntent(), rName), ), ExpectNonEmptyPlan: true, }, @@ -608,7 +608,7 @@ func TestAccLexModelBuildingIntent_updateWithExternalChange(t *testing.T) { Config: testAccIntentConfig_basic(testIntentID), Check: resource.ComposeTestCheckFunc( testAccCheckIntentExists(rName, &v), - testAccCheckAWSLexIntentUpdateDescription(acctest.Provider, tflexmodelbuilding.ResourceIntent(), rName), + testAccCheckAWSLexIntentUpdateDescription(acctest.Provider, tflexmodels.ResourceIntent(), rName), ), ExpectNonEmptyPlan: true, }, @@ -706,7 +706,7 @@ func testAccCheckIntentExistsWithVersion(rName, intentVersion string, output *le } func testAccCheckIntentExists(rName string, output *lexmodelbuildingservice.GetIntentOutput) resource.TestCheckFunc { - return testAccCheckIntentExistsWithVersion(rName, tflexmodelbuilding.IntentVersionLatest, output) + return testAccCheckIntentExistsWithVersion(rName, tflexmodels.IntentVersionLatest, output) } func testAccCheckIntentNotExists(intentName, intentVersion string) resource.TestCheckFunc { diff --git a/internal/service/lexmodelbuilding/slot_type.go b/internal/service/lexmodels/slot_type.go similarity index 99% rename from internal/service/lexmodelbuilding/slot_type.go rename to internal/service/lexmodels/slot_type.go index cfb575e0650f..eb6d96030a9b 100644 --- a/internal/service/lexmodelbuilding/slot_type.go +++ b/internal/service/lexmodels/slot_type.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "context" diff --git a/internal/service/lexmodelbuilding/slot_type_data_source.go b/internal/service/lexmodels/slot_type_data_source.go similarity index 99% rename from internal/service/lexmodelbuilding/slot_type_data_source.go rename to internal/service/lexmodels/slot_type_data_source.go index e7f69e3a2655..8a4bff177a65 100644 --- a/internal/service/lexmodelbuilding/slot_type_data_source.go +++ b/internal/service/lexmodels/slot_type_data_source.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "fmt" diff --git a/internal/service/lexmodelbuilding/slot_type_data_source_test.go b/internal/service/lexmodels/slot_type_data_source_test.go similarity index 99% rename from internal/service/lexmodelbuilding/slot_type_data_source_test.go rename to internal/service/lexmodels/slot_type_data_source_test.go index 7f813790f334..704a5ebe4f6f 100644 --- a/internal/service/lexmodelbuilding/slot_type_data_source_test.go +++ b/internal/service/lexmodels/slot_type_data_source_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "testing" diff --git a/internal/service/lexmodelbuilding/slot_type_test.go b/internal/service/lexmodels/slot_type_test.go similarity index 97% rename from internal/service/lexmodelbuilding/slot_type_test.go rename to internal/service/lexmodels/slot_type_test.go index 983cd2eda577..8f5d57041e63 100644 --- a/internal/service/lexmodelbuilding/slot_type_test.go +++ b/internal/service/lexmodels/slot_type_test.go @@ -1,4 +1,4 @@ -package lexmodelbuilding_test +package lexmodels_test import ( "fmt" @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - tflexmodelbuilding "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodelbuilding" + tflexmodels "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -45,7 +45,7 @@ func TestAccLexModelBuildingSlotType_basic(t *testing.T) { resource.TestCheckResourceAttr(rName, "name", testSlotTypeID), resource.TestCheckResourceAttr(rName, "value_selection_strategy", lexmodelbuildingservice.SlotValueSelectionStrategyOriginalValue), resource.TestCheckResourceAttrSet(rName, "checksum"), - resource.TestCheckResourceAttr(rName, "version", tflexmodelbuilding.SlotTypeVersionLatest), + resource.TestCheckResourceAttr(rName, "version", tflexmodels.SlotTypeVersionLatest), acctest.CheckResourceAttrRFC3339(rName, "created_date"), acctest.CheckResourceAttrRFC3339(rName, "last_updated_date"), ), @@ -79,7 +79,7 @@ func TestAccLexModelBuildingSlotType_createVersion(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckSlotTypeExists(rName, &v), testAccCheckSlotTypeNotExists(testSlotTypeID, "1"), - resource.TestCheckResourceAttr(rName, "version", tflexmodelbuilding.SlotTypeVersionLatest), + resource.TestCheckResourceAttr(rName, "version", tflexmodels.SlotTypeVersionLatest), ), }, { @@ -306,7 +306,7 @@ func TestAccLexModelBuildingSlotType_disappears(t *testing.T) { Config: testAccSlotTypeConfig_basic(testSlotTypeID), Check: resource.ComposeTestCheckFunc( testAccCheckSlotTypeExists(rName, &v), - acctest.CheckResourceDisappears(acctest.Provider, tflexmodelbuilding.ResourceSlotType(), rName), + acctest.CheckResourceDisappears(acctest.Provider, tflexmodels.ResourceSlotType(), rName), ), ExpectNonEmptyPlan: true, }, @@ -383,7 +383,7 @@ func testAccCheckSlotTypeExistsWithVersion(rName, slotTypeVersion string, v *lex conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn - output, err := tflexmodelbuilding.FindSlotTypeVersionByName(conn, rs.Primary.ID, slotTypeVersion) + output, err := tflexmodels.FindSlotTypeVersionByName(conn, rs.Primary.ID, slotTypeVersion) if err != nil { return err @@ -396,14 +396,14 @@ func testAccCheckSlotTypeExistsWithVersion(rName, slotTypeVersion string, v *lex } func testAccCheckSlotTypeExists(rName string, output *lexmodelbuildingservice.GetSlotTypeOutput) resource.TestCheckFunc { - return testAccCheckSlotTypeExistsWithVersion(rName, tflexmodelbuilding.SlotTypeVersionLatest, output) + return testAccCheckSlotTypeExistsWithVersion(rName, tflexmodels.SlotTypeVersionLatest, output) } func testAccCheckSlotTypeNotExists(slotTypeName, slotTypeVersion string) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).LexModelsConn - _, err := tflexmodelbuilding.FindSlotTypeVersionByName(conn, slotTypeName, slotTypeVersion) + _, err := tflexmodels.FindSlotTypeVersionByName(conn, slotTypeName, slotTypeVersion) if tfresource.NotFound(err) { return nil diff --git a/internal/service/lexmodelbuilding/status.go b/internal/service/lexmodels/status.go similarity index 99% rename from internal/service/lexmodelbuilding/status.go rename to internal/service/lexmodels/status.go index a50d20d62abc..4361335a6a58 100644 --- a/internal/service/lexmodelbuilding/status.go +++ b/internal/service/lexmodels/status.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "github.com/aws/aws-sdk-go/aws" diff --git a/internal/service/lexmodelbuilding/sweep.go b/internal/service/lexmodels/sweep.go similarity index 99% rename from internal/service/lexmodelbuilding/sweep.go rename to internal/service/lexmodels/sweep.go index 96f9222c0a56..30a3b8b65bb5 100644 --- a/internal/service/lexmodelbuilding/sweep.go +++ b/internal/service/lexmodels/sweep.go @@ -1,7 +1,7 @@ //go:build sweep // +build sweep -package lexmodelbuilding +package lexmodels import ( "fmt" diff --git a/internal/service/lexmodelbuilding/test-fixtures/lambdatest.zip b/internal/service/lexmodels/test-fixtures/lambdatest.zip similarity index 100% rename from internal/service/lexmodelbuilding/test-fixtures/lambdatest.zip rename to internal/service/lexmodels/test-fixtures/lambdatest.zip diff --git a/internal/service/lexmodelbuilding/wait.go b/internal/service/lexmodels/wait.go similarity index 99% rename from internal/service/lexmodelbuilding/wait.go rename to internal/service/lexmodels/wait.go index d98ae083e971..d9faa1ea0a59 100644 --- a/internal/service/lexmodelbuilding/wait.go +++ b/internal/service/lexmodels/wait.go @@ -1,4 +1,4 @@ -package lexmodelbuilding +package lexmodels import ( "errors" diff --git a/internal/sweep/sweep_test.go b/internal/sweep/sweep_test.go index 2879e52519c5..77aedce7fbcb 100644 --- a/internal/sweep/sweep_test.go +++ b/internal/sweep/sweep_test.go @@ -71,7 +71,7 @@ import ( _ "github.com/hashicorp/terraform-provider-aws/internal/service/kinesisanalyticsv2" _ "github.com/hashicorp/terraform-provider-aws/internal/service/kms" _ "github.com/hashicorp/terraform-provider-aws/internal/service/lambda" - _ "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodelbuilding" + _ "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" _ "github.com/hashicorp/terraform-provider-aws/internal/service/licensemanager" _ "github.com/hashicorp/terraform-provider-aws/internal/service/lightsail" _ "github.com/hashicorp/terraform-provider-aws/internal/service/mq" From 591abb1d87a5a4619ae877d34c5c2a60ea8860bc Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Nov 2021 18:23:53 -0500 Subject: [PATCH 116/304] lexmodels: Fix test names --- internal/acctest/acctest_test.go | 2 +- internal/service/lexmodels/bot_alias_test.go | 12 ++++---- .../service/lexmodels/bot_data_source_test.go | 2 +- internal/service/lexmodels/bot_test.go | 28 +++++++++---------- .../lexmodels/intent_data_source_test.go | 4 +-- internal/service/lexmodels/intent_test.go | 26 ++++++++--------- .../lexmodels/slot_type_data_source_test.go | 4 +-- internal/service/lexmodels/slot_type_test.go | 16 +++++------ 8 files changed, 47 insertions(+), 47 deletions(-) diff --git a/internal/acctest/acctest_test.go b/internal/acctest/acctest_test.go index b96f005e0e12..8d51e80bf829 100644 --- a/internal/acctest/acctest_test.go +++ b/internal/acctest/acctest_test.go @@ -218,7 +218,7 @@ func TestAccAcctestProvider_unusualEndpoints(t *testing.T) { unusual1 := []string{"es", "elasticsearch", "http://notarealendpoint"} unusual2 := []string{"databasemigration", "dms", "http://alsonotarealendpoint"} - unusual3 := []string{"lexmodels", "lexmodelbuilding", "http://kingofspain"} + unusual3 := []string{"lexmodelbuildingservice", "lexmodels", "http://kingofspain"} resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { PreCheck(t) }, diff --git a/internal/service/lexmodels/bot_alias_test.go b/internal/service/lexmodels/bot_alias_test.go index 7aaace5fe8bb..f159df7a5d5f 100644 --- a/internal/service/lexmodels/bot_alias_test.go +++ b/internal/service/lexmodels/bot_alias_test.go @@ -16,7 +16,7 @@ import ( tflexmodels "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" ) -func TestAccLexModelBuildingBotAlias_basic(t *testing.T) { +func TestAccLexModelsBotAlias_basic(t *testing.T) { var v lexmodelbuildingservice.GetBotAliasOutput resourceName := "aws_lex_bot_alias.test" testBotAliasID := "test_bot_alias" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -112,7 +112,7 @@ func testAccBotAlias_botVersion(t *testing.T) { }) } -func TestAccLexModelBuildingBotAlias_conversationLogsText(t *testing.T) { +func TestAccLexModelsBotAlias_conversationLogsText(t *testing.T) { var v lexmodelbuildingservice.GetBotAliasOutput testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) testBotAliasID := "test_bot_alias" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -161,7 +161,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsText(t *testing.T) { }) } -func TestAccLexModelBuildingBotAlias_conversationLogsAudio(t *testing.T) { +func TestAccLexModelsBotAlias_conversationLogsAudio(t *testing.T) { var v lexmodelbuildingservice.GetBotAliasOutput testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) testBotAliasID := sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -211,7 +211,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsAudio(t *testing.T) { }) } -func TestAccLexModelBuildingBotAlias_conversationLogsBoth(t *testing.T) { +func TestAccLexModelsBotAlias_conversationLogsBoth(t *testing.T) { var v lexmodelbuildingservice.GetBotAliasOutput testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) testBotAliasID := sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -267,7 +267,7 @@ func TestAccLexModelBuildingBotAlias_conversationLogsBoth(t *testing.T) { }) } -func TestAccLexModelBuildingBotAlias_description(t *testing.T) { +func TestAccLexModelsBotAlias_description(t *testing.T) { var v lexmodelbuildingservice.GetBotAliasOutput resourceName := "aws_lex_bot_alias.test" testBotAliasID := "test_bot_alias" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -316,7 +316,7 @@ func TestAccLexModelBuildingBotAlias_description(t *testing.T) { }) } -func TestAccLexModelBuildingBotAlias_disappears(t *testing.T) { +func TestAccLexModelsBotAlias_disappears(t *testing.T) { var v lexmodelbuildingservice.GetBotAliasOutput resourceName := "aws_lex_bot_alias.test" testBotAliasID := "test_bot_alias" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) diff --git a/internal/service/lexmodels/bot_data_source_test.go b/internal/service/lexmodels/bot_data_source_test.go index d76b62054ad5..752d56da2944 100644 --- a/internal/service/lexmodels/bot_data_source_test.go +++ b/internal/service/lexmodels/bot_data_source_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" ) -func TestAccLexModelBuildingBotDataSource_basic(t *testing.T) { +func TestAccLexModelsBotDataSource_basic(t *testing.T) { rName := sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) dataSourceName := "data.aws_lex_bot.test" resourceName := "aws_lex_bot.test" diff --git a/internal/service/lexmodels/bot_test.go b/internal/service/lexmodels/bot_test.go index 11422c3fb3a7..935dc0fa2339 100644 --- a/internal/service/lexmodels/bot_test.go +++ b/internal/service/lexmodels/bot_test.go @@ -26,7 +26,7 @@ func testAccErrorCheckSkipLex(t *testing.T) resource.ErrorCheckFunc { ) } -func TestAccLexModelBuildingBot_basic(t *testing.T) { +func TestAccLexModelsBot_basic(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -81,7 +81,7 @@ func TestAccLexModelBuildingBot_basic(t *testing.T) { }) } -func TestAccLexModelBuildingBot_Version_serial(t *testing.T) { +func TestAccLexModelsBot_Version_serial(t *testing.T) { testCases := map[string]func(t *testing.T){ "LexBot_createVersion": testAccBot_createVersion, "LexBotAlias_botVersion": testAccBotAlias_botVersion, @@ -144,7 +144,7 @@ func testAccBot_createVersion(t *testing.T) { }) } -func TestAccLexModelBuildingBot_abortStatement(t *testing.T) { +func TestAccLexModelsBot_abortStatement(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -204,7 +204,7 @@ func TestAccLexModelBuildingBot_abortStatement(t *testing.T) { }) } -func TestAccLexModelBuildingBot_clarificationPrompt(t *testing.T) { +func TestAccLexModelsBot_clarificationPrompt(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -260,7 +260,7 @@ func TestAccLexModelBuildingBot_clarificationPrompt(t *testing.T) { }) } -func TestAccLexModelBuildingBot_childDirected(t *testing.T) { +func TestAccLexModelsBot_childDirected(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -307,7 +307,7 @@ func TestAccLexModelBuildingBot_childDirected(t *testing.T) { }) } -func TestAccLexModelBuildingBot_description(t *testing.T) { +func TestAccLexModelsBot_description(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -354,7 +354,7 @@ func TestAccLexModelBuildingBot_description(t *testing.T) { }) } -func TestAccLexModelBuildingBot_detectSentiment(t *testing.T) { +func TestAccLexModelsBot_detectSentiment(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -401,7 +401,7 @@ func TestAccLexModelBuildingBot_detectSentiment(t *testing.T) { }) } -func TestAccLexModelBuildingBot_enableModelImprovements(t *testing.T) { +func TestAccLexModelsBot_enableModelImprovements(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -449,7 +449,7 @@ func TestAccLexModelBuildingBot_enableModelImprovements(t *testing.T) { }) } -func TestAccLexModelBuildingBot_idleSessionTTLInSeconds(t *testing.T) { +func TestAccLexModelsBot_idleSessionTTLInSeconds(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -496,7 +496,7 @@ func TestAccLexModelBuildingBot_idleSessionTTLInSeconds(t *testing.T) { }) } -func TestAccLexModelBuildingBot_intents(t *testing.T) { +func TestAccLexModelsBot_intents(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -543,7 +543,7 @@ func TestAccLexModelBuildingBot_intents(t *testing.T) { }) } -func TestAccLexModelBuildingBot_computeVersion(t *testing.T) { +func TestAccLexModelsBot_computeVersion(t *testing.T) { var v1 lexmodelbuildingservice.GetBotOutput var v2 lexmodelbuildingservice.GetBotAliasOutput @@ -603,7 +603,7 @@ func TestAccLexModelBuildingBot_computeVersion(t *testing.T) { }) } -func TestAccLexModelBuildingBot_locale(t *testing.T) { +func TestAccLexModelsBot_locale(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -650,7 +650,7 @@ func TestAccLexModelBuildingBot_locale(t *testing.T) { }) } -func TestAccLexModelBuildingBot_voiceID(t *testing.T) { +func TestAccLexModelsBot_voiceID(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -697,7 +697,7 @@ func TestAccLexModelBuildingBot_voiceID(t *testing.T) { }) } -func TestAccLexModelBuildingBot_disappears(t *testing.T) { +func TestAccLexModelsBot_disappears(t *testing.T) { var v lexmodelbuildingservice.GetBotOutput rName := "aws_lex_bot.test" testBotID := "test_bot_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) diff --git a/internal/service/lexmodels/intent_data_source_test.go b/internal/service/lexmodels/intent_data_source_test.go index 028903e547d4..b98a6b3cdfc8 100644 --- a/internal/service/lexmodels/intent_data_source_test.go +++ b/internal/service/lexmodels/intent_data_source_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" ) -func TestAccLexModelBuildingIntentDataSource_basic(t *testing.T) { +func TestAccLexModelsIntentDataSource_basic(t *testing.T) { rName := sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) dataSourceName := "data.aws_lex_intent.test" resourceName := "aws_lex_intent.test" @@ -41,7 +41,7 @@ func TestAccLexModelBuildingIntentDataSource_basic(t *testing.T) { }) } -func TestAccLexModelBuildingIntentDataSource_withVersion(t *testing.T) { +func TestAccLexModelsIntentDataSource_withVersion(t *testing.T) { rName := sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) dataSourceName := "data.aws_lex_intent.test" resourceName := "aws_lex_intent.test" diff --git a/internal/service/lexmodels/intent_test.go b/internal/service/lexmodels/intent_test.go index 6d31fb44bdde..37bcfb6503ec 100644 --- a/internal/service/lexmodels/intent_test.go +++ b/internal/service/lexmodels/intent_test.go @@ -17,7 +17,7 @@ import ( tflexmodels "github.com/hashicorp/terraform-provider-aws/internal/service/lexmodels" ) -func TestAccLexModelBuildingIntent_basic(t *testing.T) { +func TestAccLexModelsIntent_basic(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -66,7 +66,7 @@ func TestAccLexModelBuildingIntent_basic(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_createVersion(t *testing.T) { +func TestAccLexModelsIntent_createVersion(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -111,7 +111,7 @@ func TestAccLexModelBuildingIntent_createVersion(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_conclusionStatement(t *testing.T) { +func TestAccLexModelsIntent_conclusionStatement(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -167,7 +167,7 @@ func TestAccLexModelBuildingIntent_conclusionStatement(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_confirmationPromptAndRejectionStatement(t *testing.T) { +func TestAccLexModelsIntent_confirmationPromptAndRejectionStatement(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -233,7 +233,7 @@ func TestAccLexModelBuildingIntent_confirmationPromptAndRejectionStatement(t *te }) } -func TestAccLexModelBuildingIntent_dialogCodeHook(t *testing.T) { +func TestAccLexModelsIntent_dialogCodeHook(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -269,7 +269,7 @@ func TestAccLexModelBuildingIntent_dialogCodeHook(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_followUpPrompt(t *testing.T) { +func TestAccLexModelsIntent_followUpPrompt(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -341,7 +341,7 @@ func TestAccLexModelBuildingIntent_followUpPrompt(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_fulfillmentActivity(t *testing.T) { +func TestAccLexModelsIntent_fulfillmentActivity(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -379,7 +379,7 @@ func TestAccLexModelBuildingIntent_fulfillmentActivity(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_sampleUtterances(t *testing.T) { +func TestAccLexModelsIntent_sampleUtterances(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -424,7 +424,7 @@ func TestAccLexModelBuildingIntent_sampleUtterances(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_slots(t *testing.T) { +func TestAccLexModelsIntent_slots(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -480,7 +480,7 @@ func TestAccLexModelBuildingIntent_slots(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_slotsCustom(t *testing.T) { +func TestAccLexModelsIntent_slotsCustom(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -527,7 +527,7 @@ func TestAccLexModelBuildingIntent_slotsCustom(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_disappears(t *testing.T) { +func TestAccLexModelsIntent_disappears(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -553,7 +553,7 @@ func TestAccLexModelBuildingIntent_disappears(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_updateWithExternalChange(t *testing.T) { +func TestAccLexModelsIntent_updateWithExternalChange(t *testing.T) { var v lexmodelbuildingservice.GetIntentOutput rName := "aws_lex_intent.test" testIntentID := "test_intent_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -622,7 +622,7 @@ func TestAccLexModelBuildingIntent_updateWithExternalChange(t *testing.T) { }) } -func TestAccLexModelBuildingIntent_computeVersion(t *testing.T) { +func TestAccLexModelsIntent_computeVersion(t *testing.T) { var v1 lexmodelbuildingservice.GetIntentOutput var v2 lexmodelbuildingservice.GetBotOutput diff --git a/internal/service/lexmodels/slot_type_data_source_test.go b/internal/service/lexmodels/slot_type_data_source_test.go index 704a5ebe4f6f..99181df7170b 100644 --- a/internal/service/lexmodels/slot_type_data_source_test.go +++ b/internal/service/lexmodels/slot_type_data_source_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" ) -func TestAccLexModelBuildingSlotTypeDataSource_basic(t *testing.T) { +func TestAccLexModelsSlotTypeDataSource_basic(t *testing.T) { rName := sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) dataSourceName := "data.aws_lex_slot_type.test" resourceName := "aws_lex_slot_type.test" @@ -42,7 +42,7 @@ func TestAccLexModelBuildingSlotTypeDataSource_basic(t *testing.T) { }) } -func TestAccLexModelBuildingSlotTypeDataSource_withVersion(t *testing.T) { +func TestAccLexModelsSlotTypeDataSource_withVersion(t *testing.T) { rName := sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) dataSourceName := "data.aws_lex_slot_type.test" resourceName := "aws_lex_slot_type.test" diff --git a/internal/service/lexmodels/slot_type_test.go b/internal/service/lexmodels/slot_type_test.go index 8f5d57041e63..ac12665d0c48 100644 --- a/internal/service/lexmodels/slot_type_test.go +++ b/internal/service/lexmodels/slot_type_test.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestAccLexModelBuildingSlotType_basic(t *testing.T) { +func TestAccLexModelsSlotType_basic(t *testing.T) { var v lexmodelbuildingservice.GetSlotTypeOutput rName := "aws_lex_slot_type.test" testSlotTypeID := "test_slot_type_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -60,7 +60,7 @@ func TestAccLexModelBuildingSlotType_basic(t *testing.T) { }) } -func TestAccLexModelBuildingSlotType_createVersion(t *testing.T) { +func TestAccLexModelsSlotType_createVersion(t *testing.T) { var v lexmodelbuildingservice.GetSlotTypeOutput rName := "aws_lex_slot_type.test" testSlotTypeID := "test_slot_type_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -106,7 +106,7 @@ func TestAccLexModelBuildingSlotType_createVersion(t *testing.T) { }) } -func TestAccLexModelBuildingSlotType_description(t *testing.T) { +func TestAccLexModelsSlotType_description(t *testing.T) { var v lexmodelbuildingservice.GetSlotTypeOutput rName := "aws_lex_slot_type.test" testSlotTypeID := "test_slot_type_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -150,7 +150,7 @@ func TestAccLexModelBuildingSlotType_description(t *testing.T) { }) } -func TestAccLexModelBuildingSlotType_enumerationValues(t *testing.T) { +func TestAccLexModelsSlotType_enumerationValues(t *testing.T) { var v lexmodelbuildingservice.GetSlotTypeOutput rName := "aws_lex_slot_type.test" testSlotTypeID := "test_slot_type_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -199,7 +199,7 @@ func TestAccLexModelBuildingSlotType_enumerationValues(t *testing.T) { }) } -func TestAccLexModelBuildingSlotType_name(t *testing.T) { +func TestAccLexModelsSlotType_name(t *testing.T) { var v lexmodelbuildingservice.GetSlotTypeOutput rName := "aws_lex_slot_type.test" testSlotTypeID1 := "test_slot_type_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -244,7 +244,7 @@ func TestAccLexModelBuildingSlotType_name(t *testing.T) { }) } -func TestAccLexModelBuildingSlotType_valueSelectionStrategy(t *testing.T) { +func TestAccLexModelsSlotType_valueSelectionStrategy(t *testing.T) { var v lexmodelbuildingservice.GetSlotTypeOutput rName := "aws_lex_slot_type.test" testSlotTypeID := "test_slot_type_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -288,7 +288,7 @@ func TestAccLexModelBuildingSlotType_valueSelectionStrategy(t *testing.T) { }) } -func TestAccLexModelBuildingSlotType_disappears(t *testing.T) { +func TestAccLexModelsSlotType_disappears(t *testing.T) { var v lexmodelbuildingservice.GetSlotTypeOutput rName := "aws_lex_slot_type.test" testSlotTypeID := "test_slot_type_" + sdkacctest.RandStringFromCharSet(8, sdkacctest.CharSetAlpha) @@ -314,7 +314,7 @@ func TestAccLexModelBuildingSlotType_disappears(t *testing.T) { }) } -func TestAccLexModelBuildingSlotType_computeVersion(t *testing.T) { +func TestAccLexModelsSlotType_computeVersion(t *testing.T) { var v1 lexmodelbuildingservice.GetSlotTypeOutput var v2 lexmodelbuildingservice.GetIntentOutput From ccccf47bbcec39a40c5f0d5a432c3d43eeee7459 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 11 Nov 2021 18:42:00 -0500 Subject: [PATCH 117/304] Update CHANGELOG for #21743 --- .changelog/21743.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21743.txt diff --git a/.changelog/21743.txt b/.changelog/21743.txt new file mode 100644 index 000000000000..c55ba1b8fc6f --- /dev/null +++ b/.changelog/21743.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_security_group: Fix lack of pagination when describing security groups +``` \ No newline at end of file From 7af001619fd03190b341971cf8d4e38e8b51281d Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 12 Nov 2021 00:03:55 +0000 Subject: [PATCH 118/304] Update CHANGELOG.md after v3.65.0 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71ad4c8db68f..1ebee692cee0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,4 @@ +## 3.66.0 (Unreleased) ## 3.65.0 (November 11, 2021) FEATURES: From 00816a3ca7fa44646121e71ef17776aa6a9ad02a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Nov 2021 06:21:06 +0000 Subject: [PATCH 119/304] build(deps): bump github.com/aws/aws-sdk-go in /providerlint Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.42.2 to 1.42.3. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.42.2...v1.42.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- providerlint/go.mod | 2 +- providerlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 39 +++++++++++++++++++ .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- providerlint/vendor/modules.txt | 2 +- 5 files changed, 44 insertions(+), 5 deletions(-) diff --git a/providerlint/go.mod b/providerlint/go.mod index d06f1c766fc9..015a28d4dd95 100644 --- a/providerlint/go.mod +++ b/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/providerlint go 1.16 require ( - github.com/aws/aws-sdk-go v1.42.2 + github.com/aws/aws-sdk-go v1.42.3 github.com/bflad/tfproviderlint v0.27.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.8.0 golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb diff --git a/providerlint/go.sum b/providerlint/go.sum index cf87bcc328ad..f6851cc3a27b 100644 --- a/providerlint/go.sum +++ b/providerlint/go.sum @@ -70,8 +70,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.42.2 h1:SXA+B3DT4N3+wJw5X4Jz9/PazkQZQ7k1nXLGZRdFbO4= -github.com/aws/aws-sdk-go v1.42.2/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.42.3 h1:lBKr3tQ06m1uykiychMNKLK1bRfOzaIEQpsI/S3QiNc= +github.com/aws/aws-sdk-go v1.42.3/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.27.1 h1:sYlc6R8cQ0NtaCCA7Oh1ld8xfn0oiwn6mm4unooi2fo= diff --git a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index bc91d2753181..ea12126a7110 100644 --- a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -19383,6 +19383,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19398,6 +19404,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -24429,6 +24444,14 @@ var awsusgovPartition = partition{ }, }, "identitystore": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-west-1", @@ -25238,6 +25261,14 @@ var awsusgovPartition = partition{ }, }, "rds": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "rds.us-gov-east-1", @@ -25492,6 +25523,14 @@ var awsusgovPartition = partition{ }, }, "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-west-1", diff --git a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index 086452c757c4..27a961c1a9c6 100644 --- a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.42.2" +const SDKVersion = "1.42.3" diff --git a/providerlint/vendor/modules.txt b/providerlint/vendor/modules.txt index f605d9e3c7f8..4ffb6327e4c4 100644 --- a/providerlint/vendor/modules.txt +++ b/providerlint/vendor/modules.txt @@ -14,7 +14,7 @@ github.com/agext/levenshtein github.com/apparentlymart/go-textseg/v12/textseg # github.com/apparentlymart/go-textseg/v13 v13.0.0 github.com/apparentlymart/go-textseg/v13/textseg -# github.com/aws/aws-sdk-go v1.42.2 +# github.com/aws/aws-sdk-go v1.42.3 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From bdfcecb189bd0966d739cb89381aed936ff7c639 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 09:29:59 -0500 Subject: [PATCH 120/304] r/aws_accessanalyzer_analyzer: Add test sweeper. --- internal/service/accessanalyzer/analyzer.go | 11 ++-- internal/service/accessanalyzer/sweep.go | 65 +++++++++++++++++++++ internal/sweep/sweep_test.go | 1 + 3 files changed, 71 insertions(+), 6 deletions(-) create mode 100644 internal/service/accessanalyzer/sweep.go diff --git a/internal/service/accessanalyzer/analyzer.go b/internal/service/accessanalyzer/analyzer.go index 302a1cf9f9f8..24bb433614d9 100644 --- a/internal/service/accessanalyzer/analyzer.go +++ b/internal/service/accessanalyzer/analyzer.go @@ -169,19 +169,18 @@ func resourceAnalyzerUpdate(d *schema.ResourceData, meta interface{}) error { func resourceAnalyzerDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).AccessAnalyzerConn - input := &accessanalyzer.DeleteAnalyzerInput{ + log.Printf("[DEBUG] Deleting Access Analyzer Analyzer: (%s)", d.Id()) + _, err := conn.DeleteAnalyzer(&accessanalyzer.DeleteAnalyzerInput{ AnalyzerName: aws.String(d.Id()), ClientToken: aws.String(resource.UniqueId()), - } - - _, err := conn.DeleteAnalyzer(input) + }) - if tfawserr.ErrMessageContains(err, accessanalyzer.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, accessanalyzer.ErrCodeResourceNotFoundException) { return nil } if err != nil { - return fmt.Errorf("error deleting Access Analyzer Analyzer (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting Access Analyzer Analyzer (%s): %w", d.Id(), err) } return nil diff --git a/internal/service/accessanalyzer/sweep.go b/internal/service/accessanalyzer/sweep.go new file mode 100644 index 000000000000..6e0dc190210d --- /dev/null +++ b/internal/service/accessanalyzer/sweep.go @@ -0,0 +1,65 @@ +//go:build sweep +// +build sweep + +package accessanalyzer + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/accessanalyzer" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" +) + +func init() { + resource.AddTestSweepers("aws_accessanalyzer_analyzer", &resource.Sweeper{ + Name: "aws_accessanalyzer_analyzer", + F: sweepAnalyzers, + }) +} + +func sweepAnalyzers(region string) error { + client, err := sweep.SharedRegionalSweepClient(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*conns.AWSClient).AccessAnalyzerConn + input := &accessanalyzer.ListAnalyzersInput{} + sweepResources := make([]*sweep.SweepResource, 0) + + err = conn.ListAnalyzersPages(input, func(page *accessanalyzer.ListAnalyzersOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, analyzer := range page.Analyzers { + r := ResourceAnalyzer() + d := r.Data(nil) + d.SetId(aws.StringValue(analyzer.Name)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + + return !lastPage + }) + + if sweep.SkipSweepError(err) { + log.Printf("[WARN] Skipping Access Analyzer Analyzer sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Access Analyzer Analyzers (%s): %w", region, err) + } + + err = sweep.SweepOrchestrator(sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping Access Analyzer Analyzers (%s): %w", region, err) + } + + return nil +} diff --git a/internal/sweep/sweep_test.go b/internal/sweep/sweep_test.go index 77aedce7fbcb..b41ebdce0be7 100644 --- a/internal/sweep/sweep_test.go +++ b/internal/sweep/sweep_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + _ "github.com/hashicorp/terraform-provider-aws/internal/service/accessanalyzer" _ "github.com/hashicorp/terraform-provider-aws/internal/service/acm" _ "github.com/hashicorp/terraform-provider-aws/internal/service/acmpca" _ "github.com/hashicorp/terraform-provider-aws/internal/service/amplify" From 3d3d46596cdf8a58d3d983a7c0b659afb9d49bc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Nov 2021 16:05:23 +0000 Subject: [PATCH 121/304] build(deps): bump github.com/aws/aws-sdk-go from 1.42.2 to 1.42.3 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.42.2 to 1.42.3. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.42.2...v1.42.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2ae439d92028..074a5569d67c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect - github.com/aws/aws-sdk-go v1.42.2 + github.com/aws/aws-sdk-go v1.42.3 github.com/beevik/etree v1.1.0 github.com/evanphx/json-patch v0.5.2 // indirect github.com/fatih/color v1.9.0 // indirect diff --git a/go.sum b/go.sum index 8a835953b617..1e2f189ec055 100644 --- a/go.sum +++ b/go.sum @@ -66,8 +66,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.42.2 h1:SXA+B3DT4N3+wJw5X4Jz9/PazkQZQ7k1nXLGZRdFbO4= -github.com/aws/aws-sdk-go v1.42.2/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.42.3 h1:lBKr3tQ06m1uykiychMNKLK1bRfOzaIEQpsI/S3QiNc= +github.com/aws/aws-sdk-go v1.42.3/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= From 05aeb2630de8e00aced6cf2fcd713e55894edfc0 Mon Sep 17 00:00:00 2001 From: Farhan Angullia Date: Tue, 19 Oct 2021 01:39:58 +0800 Subject: [PATCH 122/304] added resource schema for s3_multi_region_access_point and s3_multi_region_access_point_policy --- internal/provider/provider.go | 2 + .../s3control/multi_region_access_point.go | 380 ++++++++++++++++++ .../multi_region_access_point_policy.go | 205 ++++++++++ 3 files changed, 587 insertions(+) create mode 100644 internal/service/s3control/multi_region_access_point.go create mode 100644 internal/service/s3control/multi_region_access_point_policy.go diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 87db2d3e31bc..7a991c8989cd 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1483,6 +1483,8 @@ func Provider() *schema.Provider { "aws_s3control_bucket": s3control.ResourceBucket(), "aws_s3control_bucket_lifecycle_configuration": s3control.ResourceBucketLifecycleConfiguration(), "aws_s3control_bucket_policy": s3control.ResourceBucketPolicy(), + "aws_s3_multi_region_access_point": s3control.ResourceMultiRegionAccessPoint(), + "aws_s3_multi_region_access_point_policy": s3control.ResourceMultiRegionAccessPointPolicy(), "aws_s3outposts_endpoint": s3outposts.ResourceEndpoint(), diff --git a/internal/service/s3control/multi_region_access_point.go b/internal/service/s3control/multi_region_access_point.go new file mode 100644 index 000000000000..873c1e83ca13 --- /dev/null +++ b/internal/service/s3control/multi_region_access_point.go @@ -0,0 +1,380 @@ +package s3control + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceMultiRegionAccessPoint() *schema.Resource { + return &schema.Resource{ + Create: resourceMultiRegionAccessPointCreate, + Read: resourceMultiRegionAccessPointRead, + Delete: resourceMultiRegionAccessPointDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(15 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidAccountID, + }, + "alias": { + Type: schema.TypeString, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "details": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateS3MultiRegionAccessPointName, + }, + "public_access_block": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MinItems: 0, + MaxItems: 1, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "block_public_acls": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + "block_public_policy": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + "ignore_public_acls": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + "restrict_public_buckets": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + }, + }, + }, + "region": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 20, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 255), + }, + }, + }, + }, + }, + }, + }, + "domain_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceMultiRegionAccessPointCreate(d *schema.ResourceData, meta interface{}) error { + conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + + if err != nil { + return fmt.Errorf("Error getting S3Control Client: %s", err) + } + + accountId := meta.(*conns.AWSClient).AccountID + if v, ok := d.GetOk("account_id"); ok { + accountId = v.(string) + } + + input := &s3control.CreateMultiRegionAccessPointInput{ + AccountId: aws.String(accountId), + Details: expandMultiRegionAccessPointDetails(d.Get("details").([]interface{})[0].(map[string]interface{})), + } + + name := aws.StringValue(input.Details.Name) + log.Printf("[DEBUG] Creating S3 Multi-Region Access Point: %s", input) + output, err := conn.CreateMultiRegionAccessPoint(input) + + if err != nil { + return fmt.Errorf("error creating S3 Control Multi-Region Access Point (%s): %w", name, err) + } + + if output == nil { + return fmt.Errorf("error creating S3 Control Multi-Region Access Point (%s): empty response", name) + } + + requestTokenARN := aws.StringValue(output.RequestTokenARN) + _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("error waiting for S3 Multi-Region Access Point (%s) to create: %s", d.Id(), err) + } + + d.SetId(fmt.Sprintf("%s:%s", accountId, name)) + + return resourceMultiRegionAccessPointRead(d, meta) +} + +func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{}) error { + conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + + if err != nil { + return fmt.Errorf("Error getting S3Control Client: %s", err) + } + + accountId, name, err := MultiRegionAccessPointParseId(d.Id()) + if err != nil { + return err + } + + output, err := conn.GetMultiRegionAccessPoint(&s3control.GetMultiRegionAccessPointInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ErrCodeNoSuchMultiRegionAccessPoint) { + log.Printf("[WARN] S3 Multi-Region Access Point (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Multi-Region Access Point (%s): %w", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error reading S3 Multi-Region Access Point (%s): empty response", d.Id()) + } + + d.Set("account_id", accountId) + d.Set("alias", output.AccessPoint.Alias) + d.Set("domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.accesspoint.s3-global", aws.StringValue(output.AccessPoint.Alias)))) + d.Set("status", output.AccessPoint.Status) + + multiRegionAccessPointARN := arn.ARN{ + AccountID: accountId, + Partition: meta.(*conns.AWSClient).Partition, + Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.AccessPoint.Alias)), + Service: "s3", + } + + d.Set("arn", multiRegionAccessPointARN.String()) + + if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointDetails(output.AccessPoint)}); err != nil { + return fmt.Errorf("error setting details: %s", err) + } + + return nil +} + +func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface{}) error { + conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + if err != nil { + return fmt.Errorf("Error getting S3Control Client: %s", err) + } + + accountId, name, err := MultiRegionAccessPointParseId(d.Id()) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting S3 Multi-Region Access Point: %s", d.Id()) + output, err := conn.DeleteMultiRegionAccessPoint(&s3control.DeleteMultiRegionAccessPointInput{ + AccountId: aws.String(accountId), + Details: &s3control.DeleteMultiRegionAccessPointInput_{ + Name: aws.String(name), + }, + }) + + if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchMultiRegionAccessPoint) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting S3 Multi-Region Access Point (%s): %s", d.Id(), err) + } + + requestTokenARN := aws.StringValue(output.RequestTokenARN) + _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return fmt.Errorf("error waiting for S3 Multi-Region Access Point (%s) to delete: %w", d.Id(), err) + } + + return nil +} + +// MultiRegionAccessPointParseId returns the Account ID and Access Point Name (S3) +func MultiRegionAccessPointParseId(id string) (string, string, error) { + parts := strings.SplitN(id, ":", 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%s), expected ACCOUNT_ID:NAME", id) + } + + return parts[0], parts[1], nil +} + +func expandMultiRegionAccessPointDetails(tfMap map[string]interface{}) *s3control.CreateMultiRegionAccessPointInput_ { + if tfMap == nil { + return nil + } + + apiObject := &s3control.CreateMultiRegionAccessPointInput_{} + + if v, ok := tfMap["name"].(string); ok { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["public_access_block"].([]interface{}); ok && len(v) > 0 { + apiObject.PublicAccessBlock = expandS3AccessPointPublicAccessBlockConfiguration(v) + } + + if v, ok := tfMap["region"]; ok { + apiObject.Regions = expandMultiRegionAccessPointRegions(v.(*schema.Set).List()) + } + + return apiObject +} + +func expandMultiRegionAccessPointRegions(tfList []interface{}) []*s3control.Region { + regions := make([]*s3control.Region, 0, len(tfList)) + + for _, tfMapRaw := range tfList { + value, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + region := &s3control.Region{ + Bucket: aws.String(value["bucket"].(string)), + } + + regions = append(regions, region) + } + + return regions +} + +func flattenMultiRegionAccessPointDetails(apiObject *s3control.MultiRegionAccessPointReport) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + if v := apiObject.PublicAccessBlock; v != nil { + tfMap["public_access_block"] = flattenS3AccessPointPublicAccessBlockConfiguration(v) + } + + if v := apiObject.Regions; v != nil { + tfMap["region"] = flattenMultiRegionAccessPointRegions(v) + } + + return tfMap +} + +func flattenMultiRegionAccessPointRegions(apiObjects []*s3control.RegionReport) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + if apiObject.Bucket == nil { + continue + } + + m := map[string]interface{}{} + if v := apiObject.Bucket; v != nil { + m["bucket"] = aws.StringValue(v) + } + + tfList = append(tfList, m) + } + + return tfList +} + +func getS3ControlConn(awsClient *conns.AWSClient) (*s3control.S3Control, error) { + if awsClient.S3ControlConn.Config.Region != nil && *awsClient.S3ControlConn.Config.Region == endpoints.UsWest2RegionID { + return awsClient.S3ControlConn, nil + } + + sess, err := session.NewSession(&awsClient.S3ControlConn.Config) + + if err != nil { + return nil, fmt.Errorf("error creating AWS S3Control session: %w", err) + } + + // Multi-Region Access Point requires requests to be routed to the us-west-2 endpoint + conn := s3control.New(sess.Copy(&aws.Config{Region: aws.String(endpoints.UsWest2RegionID)})) + + return conn, nil +} diff --git a/internal/service/s3control/multi_region_access_point_policy.go b/internal/service/s3control/multi_region_access_point_policy.go new file mode 100644 index 000000000000..a87237aff182 --- /dev/null +++ b/internal/service/s3control/multi_region_access_point_policy.go @@ -0,0 +1,205 @@ +package s3control + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceMultiRegionAccessPointPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceMultiRegionAccessPointPolicyCreate, + Read: resourceMultiRegionAccessPointPolicyRead, + Update: resourceMultiRegionAccessPointPolicyUpdate, + Delete: schema.Noop, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + Update: schema.DefaultTimeout(15 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidAccountID, + }, + "details": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateS3MultiRegionAccessPointName, + }, + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, + }, + }, + }, + }, + "established": { + Type: schema.TypeString, + Computed: true, + }, + "proposed": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceMultiRegionAccessPointPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + if err != nil { + return fmt.Errorf("Error getting S3Control Client: %s", err) + } + + accountId := meta.(*conns.AWSClient).AccountID + if v, ok := d.GetOk("account_id"); ok { + accountId = v.(string) + } + + input := &s3control.PutMultiRegionAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Details: expandMultiRegionAccessPointPolicyDetails(d.Get("details").([]interface{})[0].(map[string]interface{})), + } + + name := aws.StringValue(input.Details.Name) + log.Printf("[DEBUG] Creating S3 Multi-Region Access Point policy: %s", d.Id()) + output, err := conn.PutMultiRegionAccessPointPolicy(input) + + if err != nil { + return fmt.Errorf("error creating S3 Multi-Region Access Point (%s) policy: %s", d.Id(), err) + } + + requestTokenARN := aws.StringValue(output.RequestTokenARN) + _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) to be created: %s", d.Id(), err) + } + + d.SetId(fmt.Sprintf("%s:%s", accountId, name)) + + return resourceMultiRegionAccessPointPolicyRead(d, meta) +} + +func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + if err != nil { + return fmt.Errorf("Error getting S3Control Client: %s", err) + } + + accountId, name, err := MultiRegionAccessPointParseId(d.Id()) + if err != nil { + return err + } + + policyOutput, err := conn.GetMultiRegionAccessPointPolicy(&s3control.GetMultiRegionAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchMultiRegionAccessPoint) { + log.Printf("[WARN] S3 Multi-Region Access Point (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Multi-Region Access Point (%s) policy: %s", d.Id(), err) + } + + log.Printf("[DEBUG] S3 Multi-Region Access Point policy output: %s", policyOutput) + + d.Set("account_id", accountId) + d.Set("established", policyOutput.Policy.Established.Policy) + d.Set("proposed", policyOutput.Policy.Proposed.Policy) + d.Set("details", []interface{}{policyDocumentToDetailsMap(aws.String(name), policyOutput.Policy)}) + + return nil +} + +func resourceMultiRegionAccessPointPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + if err != nil { + return fmt.Errorf("Error getting S3Control Client: %s", err) + } + + accountId, _, err := MultiRegionAccessPointParseId(d.Id()) + if err != nil { + return err + } + + if d.HasChange("details") { + log.Printf("[DEBUG] Updating S3 Multi-Region Access Point policy: %s", d.Id()) + output, err := conn.PutMultiRegionAccessPointPolicy(&s3control.PutMultiRegionAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Details: expandMultiRegionAccessPointPolicyDetails(d.Get("details").([]interface{})[0].(map[string]interface{})), + }) + + if err != nil { + return fmt.Errorf("error updating S3 Multi-Region Access Point (%s) policy: %s", d.Id(), err) + } + + requestTokenARN := *output.RequestTokenARN + _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) to update: %s", d.Id(), err) + } + } + + return resourceMultiRegionAccessPointPolicyRead(d, meta) +} + +func expandMultiRegionAccessPointPolicyDetails(tfMap map[string]interface{}) *s3control.PutMultiRegionAccessPointPolicyInput_ { + if tfMap == nil { + return nil + } + + apiObject := &s3control.PutMultiRegionAccessPointPolicyInput_{} + + if v, ok := tfMap["name"].(string); ok { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["policy"].(string); ok { + apiObject.Policy = aws.String(v) + } + + return apiObject +} + +func policyDocumentToDetailsMap(multiRegionAccessPointName *string, policyDocument *s3control.MultiRegionAccessPointPolicyDocument) map[string]interface{} { + details := map[string]interface{}{} + + details["name"] = aws.StringValue(multiRegionAccessPointName) + details["policy"] = aws.StringValue(policyDocument.Proposed.Policy) + + return details +} From b323f49f0ab3d3101454b6f2a49797e764f6d754 Mon Sep 17 00:00:00 2001 From: Farhan Angullia Date: Tue, 19 Oct 2021 01:41:08 +0800 Subject: [PATCH 123/304] added finder, waiter, validator and errs --- internal/service/s3control/errors.go | 5 +-- internal/service/s3control/find.go | 42 ++++++++++++++++++++++++++ internal/service/s3control/status.go | 37 +++++++++++++++++++++++ internal/service/s3control/validate.go | 27 +++++++++++++++++ internal/service/s3control/wait.go | 20 ++++++++++++ 5 files changed, 129 insertions(+), 2 deletions(-) create mode 100644 internal/service/s3control/validate.go diff --git a/internal/service/s3control/errors.go b/internal/service/s3control/errors.go index 3d60a9550085..972877fb8263 100644 --- a/internal/service/s3control/errors.go +++ b/internal/service/s3control/errors.go @@ -4,6 +4,7 @@ package s3control // https://docs.aws.amazon.com/sdk-for-go/api/service/s3control/#pkg-constants //nolint:deadcode,varcheck // These constants are missing from the AWS SDK const ( - errCodeNoSuchAccessPoint = "NoSuchAccessPoint" - errCodeNoSuchAccessPointPolicy = "NoSuchAccessPointPolicy" + errCodeNoSuchAccessPoint = "NoSuchAccessPoint" + errCodeNoSuchAccessPointPolicy = "NoSuchAccessPointPolicy" + ErrCodeNoSuchMultiRegionAccessPoint = "NoSuchMultiRegionAccessPoint" ) diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index e8ce1e01b46f..49f38dca03be 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -1,6 +1,8 @@ package s3control import ( + "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" ) @@ -22,3 +24,43 @@ func findPublicAccessBlockConfiguration(conn *s3control.S3Control, accountID str return output.PublicAccessBlockConfiguration, nil } + +func FindMultiRegionAccessPointByName(conn *s3control.S3Control, accountId string, name string) (*s3control.MultiRegionAccessPointReport, error) { + input := &s3control.GetMultiRegionAccessPointInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + } + + log.Printf("[DEBUG] Getting S3 Multi-Region Access Point (%s): %s", name, input) + + output, err := conn.GetMultiRegionAccessPoint(input) + + if err != nil { + return nil, err + } + + if output == nil || output.AccessPoint == nil { + return nil, nil + } + + return output.AccessPoint, nil +} + +func FindMultiRegionAccessPointPolicyDocumentByName(conn *s3control.S3Control, accountID string, name string) (*s3control.MultiRegionAccessPointPolicyDocument, error) { + input := &s3control.GetMultiRegionAccessPointPolicyInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + output, err := conn.GetMultiRegionAccessPointPolicy(input) + + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output.Policy, nil +} diff --git a/internal/service/s3control/status.go b/internal/service/s3control/status.go index ca9268f2ac2c..d8f417a0f099 100644 --- a/internal/service/s3control/status.go +++ b/internal/service/s3control/status.go @@ -1,6 +1,8 @@ package s3control import ( + "fmt" + "log" "strconv" "github.com/aws/aws-sdk-go/aws" @@ -8,6 +10,14 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) +const ( + // RequestStatus SUCCEEDED + RequestStatusSucceeded = "SUCCEEDED" + + // RequestStatus FAILED + RequestStatusFailed = "FAILED" +) + // statusPublicAccessBlockConfigurationBlockPublicACLs fetches the PublicAccessBlockConfiguration and its BlockPublicAcls func statusPublicAccessBlockConfigurationBlockPublicACLs(conn *s3control.S3Control, accountID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { @@ -75,3 +85,30 @@ func statusPublicAccessBlockConfigurationRestrictPublicBuckets(conn *s3control.S return publicAccessBlockConfiguration, strconv.FormatBool(aws.BoolValue(publicAccessBlockConfiguration.RestrictPublicBuckets)), nil } } + +func statusMultiRegionAccessPointRequest(conn *s3control.S3Control, accountId string, requestTokenArn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &s3control.DescribeMultiRegionAccessPointOperationInput{ + AccountId: aws.String(accountId), + RequestTokenARN: aws.String(requestTokenArn), + } + + log.Printf("[DEBUG] Describing S3 Multi-Region Access Point Operation (%s): %s", requestTokenArn, input) + + output, err := conn.DescribeMultiRegionAccessPointOperation(input) + + if err != nil { + log.Printf("error Describing S3 Multi-Region Access Point Operation (%s): %s", requestTokenArn, err) + return nil, "", err + } + + asyncOperation := output.AsyncOperation + + if aws.StringValue(asyncOperation.RequestStatus) == RequestStatusFailed { + errorDetails := asyncOperation.ResponseDetails.ErrorDetails + return nil, RequestStatusFailed, fmt.Errorf("S3 Multi-Region Access Point asynchronous operation failed (%s): %s: %s", requestTokenArn, aws.StringValue(errorDetails.Code), aws.StringValue(errorDetails.Message)) + } + + return asyncOperation, aws.StringValue(asyncOperation.RequestStatus), nil + } +} diff --git a/internal/service/s3control/validate.go b/internal/service/s3control/validate.go new file mode 100644 index 000000000000..b7f0af1d9423 --- /dev/null +++ b/internal/service/s3control/validate.go @@ -0,0 +1,27 @@ +package s3control + +import ( + "fmt" + "regexp" +) + +func validateS3MultiRegionAccessPointName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) < 3 || len(value) > 50 { + errors = append(errors, fmt.Errorf( + "%q cannot be less than 3 or longer than 50 characters", k)) + } + if regexp.MustCompile(`_|[A-Z]|\.`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "cannot contain underscores, uppercase letters, or periods. %q", k)) + } + if regexp.MustCompile(`^-`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot begin with a hyphen", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + return +} diff --git a/internal/service/s3control/wait.go b/internal/service/s3control/wait.go index ae00f69715ff..9b877ac25e47 100644 --- a/internal/service/s3control/wait.go +++ b/internal/service/s3control/wait.go @@ -1,6 +1,7 @@ package s3control import ( + "log" "strconv" "time" @@ -90,3 +91,22 @@ func waitPublicAccessBlockConfigurationRestrictPublicBucketsUpdated(conn *s3cont return nil, err } + +func waitS3MultiRegionAccessPointRequestSucceeded(conn *s3control.S3Control, accountId string, requestTokenArn string, timeout time.Duration) (*s3control.AsyncOperation, error) { + stateConf := &resource.StateChangeConf{ + Target: []string{RequestStatusSucceeded}, + Timeout: timeout, + Refresh: statusMultiRegionAccessPointRequest(conn, accountId, requestTokenArn), + MinTimeout: 5 * time.Second, + Delay: 15 * time.Second, // Wait 15 secs before starting + } + + log.Printf("[DEBUG] Waiting for S3 Multi-Region Access Point request (%s) to succeed", requestTokenArn) + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*s3control.AsyncOperation); ok { + return output, err + } + + return nil, err +} From 0bad5e92f4cd415429d517edb09e6787fa8967b4 Mon Sep 17 00:00:00 2001 From: Farhan Angullia Date: Tue, 19 Oct 2021 01:41:36 +0800 Subject: [PATCH 124/304] added acc tests and sweepers --- internal/acctest/acctest.go | 15 + .../multi_region_access_point_policy_test.go | 262 +++++++++++++++ .../multi_region_access_point_test.go | 301 ++++++++++++++++++ internal/service/s3control/sweep.go | 69 ++++ 4 files changed, 647 insertions(+) create mode 100644 internal/service/s3control/multi_region_access_point_policy_test.go create mode 100644 internal/service/s3control/multi_region_access_point_test.go diff --git a/internal/acctest/acctest.go b/internal/acctest/acctest.go index 34a24835bd51..3d80b6ac7b40 100644 --- a/internal/acctest/acctest.go +++ b/internal/acctest/acctest.go @@ -375,6 +375,21 @@ func MatchResourceAttrRegionalHostname(resourceName, attributeName, serviceName } } +// MatchResourceAttrGlobalHostname ensures the Terraform state regexp matches a formatted DNS hostname with partition DNS suffix and without region +func MatchResourceAttrGlobalHostname(resourceName, attributeName, serviceName string, hostnamePrefixRegexp *regexp.Regexp) resource.TestCheckFunc { + return func(s *terraform.State) error { + hostnameRegexpPattern := fmt.Sprintf("%s\\.%s\\.%s$", hostnamePrefixRegexp.String(), serviceName, PartitionDNSSuffix()) + + hostnameRegexp, err := regexp.Compile(hostnameRegexpPattern) + + if err != nil { + return fmt.Errorf("Unable to compile hostname regexp (%s): %w", hostnameRegexp, err) + } + + return resource.TestMatchResourceAttr(resourceName, attributeName, hostnameRegexp)(s) + } +} + // CheckResourceAttrGlobalARN ensures the Terraform state exactly matches a formatted ARN without region func CheckResourceAttrGlobalARN(resourceName, attributeName, arnService, arnResource string) resource.TestCheckFunc { return func(s *terraform.State) error { diff --git a/internal/service/s3control/multi_region_access_point_policy_test.go b/internal/service/s3control/multi_region_access_point_policy_test.go new file mode 100644 index 000000000000..25f6212e4e54 --- /dev/null +++ b/internal/service/s3control/multi_region_access_point_policy_test.go @@ -0,0 +1,262 @@ +package s3control_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/service/s3control" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" +) + +func TestAccS3ControlMultiRegionAccessPointPolicy_basic(t *testing.T) { + var v s3control.MultiRegionAccessPointPolicyDocument + resourceName := "aws_s3_multi_region_access_point_policy.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + multiRegionAccessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + // Multi-Region Access Point Policy cannot be deleted once applied. + // Ensure parent resource is destroyed instead. + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, multiRegionAccessPointName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointPolicyExists(resourceName, &v), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + resource.TestCheckResourceAttr(resourceName, "details.#", "1"), + resource.TestCheckResourceAttr(resourceName, "details.0.name", multiRegionAccessPointName), + resource.TestCheckResourceAttrSet(resourceName, "details.0.policy"), + resource.TestCheckResourceAttrSet(resourceName, "established"), + resource.TestCheckResourceAttrSet(resourceName, "proposed"), + resource.TestCheckResourceAttrPair(resourceName, "details.0.policy", resourceName, "proposed"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint(t *testing.T) { + var v s3control.MultiRegionAccessPointReport + parentResourceName := "aws_s3_multi_region_access_point.test" + resourceName := "aws_s3_multi_region_access_point_policy.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + // Multi-Region Access Point Policy cannot be deleted once applied. + // Ensure parent resource is destroyed instead. + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointExists(resourceName, &v), + testAccCheckMultiRegionAccessPointDisappears(acctest.Provider, tfs3control.ResourceMultiRegionAccessPoint(), parentResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3ControlMultiRegionAccessPointPolicy_details_policy(t *testing.T) { + var v1, v2 s3control.MultiRegionAccessPointPolicyDocument + resourceName := "aws_s3_multi_region_access_point_policy.test" + multiRegionAccessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + // Multi-Region Access Point Policy cannot be deleted once applied. + // Ensure parent resource is destroyed instead. + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, multiRegionAccessPointName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointPolicyExists(resourceName, &v1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccMultiRegionAccessPointPolicyConfig_updatedStatement(bucketName, multiRegionAccessPointName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointPolicyExists(resourceName, &v2), + testAccCheckMultiRegionAccessPointPolicyChanged(&v1, &v2), + ), + }, + }, + }) +} + +func TestAccS3ControlMultiRegionAccessPointPolicy_details_name(t *testing.T) { + var v1, v2 s3control.MultiRegionAccessPointPolicyDocument + resourceName := "aws_s3_multi_region_access_point_policy.test" + multiRegionAccessPointName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + multiRegionAccessPointName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + // Multi-Region Access Point Policy cannot be deleted once applied. + // Ensure parent resource is destroyed instead. + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, multiRegionAccessPointName1), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointPolicyExists(resourceName, &v1), + resource.TestCheckResourceAttr(resourceName, "details.0.name", multiRegionAccessPointName1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, multiRegionAccessPointName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointPolicyExists(resourceName, &v2), + resource.TestCheckResourceAttr(resourceName, "details.0.name", multiRegionAccessPointName2), + ), + }, + }, + }) +} + +func testAccCheckMultiRegionAccessPointPolicyExists(n string, m *s3control.MultiRegionAccessPointPolicyDocument) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) + if err != nil { + return err + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + + policyDocument, err := tfs3control.FindMultiRegionAccessPointPolicyDocumentByName(conn, accountId, name) + + if err != nil { + return err + } + + if policyDocument != nil { + *m = *policyDocument + return nil + } + + return fmt.Errorf("Multi-Region Access Point Policy not found") + } +} + +func testAccCheckMultiRegionAccessPointPolicyChanged(i, j *s3control.MultiRegionAccessPointPolicyDocument) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.Proposed.Policy) == aws.StringValue(j.Proposed.Policy) { + return fmt.Errorf("S3 Multi-Region Access Point Policy did not change") + } + + return nil + } +} + +func testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, multiRegionAccessPointName string) string { + return acctest.ConfigCompose( + testAccMultiRegionAccessPointConfig_basic(bucketName, multiRegionAccessPointName), + fmt.Sprintf(` +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_s3_multi_region_access_point_policy" "test" { + details { + name = %[1]q + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Sid" : "Test", + "Effect" : "Allow", + "Principal" : { + "AWS" : data.aws_caller_identity.current.account_id + }, + "Action" : "s3:GetObject", + "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3_multi_region_access_point.test.alias}/object/*" + } + ] + }) + } +} +`, multiRegionAccessPointName)) +} + +func testAccMultiRegionAccessPointPolicyConfig_updatedStatement(bucketName, multiRegionAccessPointName string) string { + return acctest.ConfigCompose( + testAccMultiRegionAccessPointConfig_basic(bucketName, multiRegionAccessPointName), + fmt.Sprintf(` +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_s3_multi_region_access_point_policy" "test" { + details { + name = %[1]q + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Sid" : "Test", + "Effect" : "Allow", + "Principal" : { + "AWS" : data.aws_caller_identity.current.account_id + }, + "Action" : "s3:PutObject", + "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3_multi_region_access_point.test.alias}/object/*" + } + ] + }) + } +} +`, multiRegionAccessPointName)) +} diff --git a/internal/service/s3control/multi_region_access_point_test.go b/internal/service/s3control/multi_region_access_point_test.go new file mode 100644 index 000000000000..535219e7fb19 --- /dev/null +++ b/internal/service/s3control/multi_region_access_point_test.go @@ -0,0 +1,301 @@ +package s3control_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" +) + +func TestAccS3ControlMultiRegionAccessPoint_basic(t *testing.T) { + var v s3control.MultiRegionAccessPointReport + resourceName := "aws_s3_multi_region_access_point.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointConfig_basic(bucketName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointExists(resourceName, &v), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + resource.TestMatchResourceAttr(resourceName, "alias", regexp.MustCompile(`^[a-z][a-z0-9]*[.]mrap$`)), + acctest.MatchResourceAttrGlobalARN(resourceName, "arn", "s3", regexp.MustCompile(`accesspoint\/[a-z][a-z0-9]*[.]mrap$`)), + acctest.MatchResourceAttrGlobalHostname(resourceName, "domain_name", "accesspoint.s3-global", regexp.MustCompile(`^[a-z][a-z0-9]*[.]mrap`)), + resource.TestCheckResourceAttr(resourceName, "details.#", "1"), + resource.TestCheckResourceAttr(resourceName, "details.0.name", rName), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.#", "1"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.block_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.block_public_policy", "true"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.ignore_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.restrict_public_buckets", "true"), + resource.TestCheckResourceAttr(resourceName, "details.0.region.#", "1"), + resource.TestCheckResourceAttr(resourceName, "details.0.region.0.bucket", bucketName), + resource.TestCheckResourceAttr(resourceName, "status", s3control.MultiRegionAccessPointStatusReady), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccS3ControlMultiRegionAccessPoint_disappears(t *testing.T) { + var v s3control.MultiRegionAccessPointReport + resourceName := "aws_s3_multi_region_access_point.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointConfig_basic(bucketName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointExists(resourceName, &v), + testAccCheckMultiRegionAccessPointDisappears(acctest.Provider, tfs3control.ResourceMultiRegionAccessPoint(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock(t *testing.T) { + var v s3control.MultiRegionAccessPointReport + resourceName := "aws_s3_multi_region_access_point.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointConfig_publicAccessBlock(bucketName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.#", "1"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.block_public_acls", "false"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.block_public_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.ignore_public_acls", "false"), + resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.restrict_public_buckets", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccS3ControlMultiRegionAccessPoint_name(t *testing.T) { + var v1, v2 s3control.MultiRegionAccessPointReport + resourceName := "aws_s3_multi_region_access_point.test" + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointConfig_basic(bucketName, rName1), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointExists(resourceName, &v1), + resource.TestCheckResourceAttr(resourceName, "details.0.name", rName1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccMultiRegionAccessPointConfig_basic(bucketName, rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointExists(resourceName, &v2), + testAccCheckMultiRegionAccessPointRecreated(&v1, &v2), + resource.TestCheckResourceAttr(resourceName, "details.0.name", rName2), + ), + }, + }, + }) +} + +func testAccCheckMultiRegionAccessPointDisappears(provider *schema.Provider, resource *schema.Resource, resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + resourceState, ok := s.RootModule().Resources[resourceName] + + if !ok { + return fmt.Errorf("resource not found: %s", resourceName) + } + + if resourceState.Primary.ID == "" { + return fmt.Errorf("No S3 Multi-Region Access Point ID is set") + } + + return acctest.DeleteResource(resource, resource.Data(resourceState.Primary), provider.Meta()) + } +} + +func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_multi_region_access_point" { + continue + } + + accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) + if err != nil { + return err + } + + resp, err := conn.GetMultiRegionAccessPoint(&s3control.GetMultiRegionAccessPointInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchMultiRegionAccessPoint) { + continue + } + + if err != nil { + return err + } + + if resp != nil && resp.AccessPoint != nil && fmt.Sprintf("%s:%s", accountId, aws.StringValue(resp.AccessPoint.Name)) == rs.Primary.ID { + return fmt.Errorf("S3 Multi-Region Access Point with ID %v still exists", rs.Primary.ID) + } + } + return nil +} + +func testAccCheckMultiRegionAccessPointExists(n string, m *s3control.MultiRegionAccessPointReport) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) + if err != nil { + return err + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + + multiRegionAccessPoint, err := tfs3control.FindMultiRegionAccessPointByName(conn, accountId, name) + + if err != nil { + return err + } + + if multiRegionAccessPoint != nil { + *m = *multiRegionAccessPoint + return nil + } + + return fmt.Errorf("Multi-Region Access Point not found") + } +} + +// Multi-Region Access Point aliases are unique throughout time and aren’t based on the name or configuration of a Multi-Region Access Point. +// If you create a Multi-Region Access Point, and then delete it and create another one with the same name and configuration, the +// second Multi-Region Access Point will have a different alias than the first. (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CreatingMultiRegionAccessPoints.html#multi-region-access-point-naming) +func testAccCheckMultiRegionAccessPointRecreated(before, after *s3control.MultiRegionAccessPointReport) resource.TestCheckFunc { + return func(s *terraform.State) error { + if before, after := aws.StringValue(before.Alias), aws.StringValue(after.Alias); before == after { + return fmt.Errorf("S3 Multi-Region Access Point (%s) not recreated", before) + } + + return nil + } +} + +func testAccMultiRegionAccessPointConfig_basic(bucketName, multiRegionAccessPointName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_s3_multi_region_access_point" "test" { + details { + name = %[2]q + + region { + bucket = aws_s3_bucket.test.id + } + } +} +`, bucketName, multiRegionAccessPointName) +} + +func testAccMultiRegionAccessPointConfig_publicAccessBlock(bucketName, multiRegionAccessPointName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_s3_multi_region_access_point" "test" { + details { + name = %[2]q + + public_access_block { + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false + } + + region { + bucket = aws_s3_bucket.test.id + } + } +} +`, bucketName, multiRegionAccessPointName) +} diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index 0dad2aed52b9..db00b2bc8d28 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -21,6 +21,11 @@ func init() { Name: "aws_s3_access_point", F: sweepAccessPoints, }) + + resource.AddTestSweepers("aws_s3_multi_region_access_point", &resource.Sweeper{ + Name: "aws_s3_multi_region_access_point", + F: sweepMultiRegionAccessPoints, + }) } func sweepAccessPoints(region string) error { @@ -78,3 +83,67 @@ func sweepAccessPoints(region string) error { return sweeperErrs.ErrorOrNil() } + +func sweepMultiRegionAccessPoints(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + + if client.(*AWSClient).region != endpoints.UsWest2RegionID { + log.Printf("[WARN] Skipping sweep for region: %s", client.(*AWSClient).region) + return nil + } + + accountId := client.(*AWSClient).accountid + conn := client.(*AWSClient).s3controlconn + + input := &s3control.ListMultiRegionAccessPointsInput{ + AccountId: aws.String(accountId), + } + var sweeperErrs *multierror.Error + + err = conn.ListMultiRegionAccessPointsPages(input, func(page *s3control.ListMultiRegionAccessPointsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, multiRegionAccessPoint := range page.AccessPoints { + input := &s3control.DeleteMultiRegionAccessPointInput{ + AccountId: aws.String(accountId), + Details: &s3control.DeleteMultiRegionAccessPointInput_{ + Name: multiRegionAccessPoint.Name, + }, + } + + name := aws.StringValue(multiRegionAccessPoint.Name) + + log.Printf("[INFO] Deleting S3 Multi-Region Access Point: %s", name) + _, err := conn.DeleteMultiRegionAccessPoint(input) + + if tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchMultiRegionAccessPoint) { + continue + } + + if err != nil { + sweeperErr := fmt.Errorf("error deleting S3 Multi-Region Access Point (%s): %w", name, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping S3 Multi-Region Access Point sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing S3 Multi-Region Access Points: %w", err) + } + + return sweeperErrs.ErrorOrNil() +} From cf1759457c898f2223c9000865e1204f6e71fcd6 Mon Sep 17 00:00:00 2001 From: Farhan Angullia Date: Tue, 19 Oct 2021 01:41:52 +0800 Subject: [PATCH 125/304] added site docs --- ...s3_multi_region_access_point.html.markdown | 117 ++++++++++++++++++ ...i_region_access_point_policy.html.markdown | 93 ++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 website/docs/r/s3_multi_region_access_point.html.markdown create mode 100644 website/docs/r/s3_multi_region_access_point_policy.html.markdown diff --git a/website/docs/r/s3_multi_region_access_point.html.markdown b/website/docs/r/s3_multi_region_access_point.html.markdown new file mode 100644 index 000000000000..d13028ef155d --- /dev/null +++ b/website/docs/r/s3_multi_region_access_point.html.markdown @@ -0,0 +1,117 @@ +--- +subcategory: "S3" +layout: "aws" +page_title: "AWS: aws_s3_multi_region_access_point" +description: |- + Provides a resource to manage a Multi-Region S3 Access Point associated with specified buckets. +--- + +# Resource: aws_s3_multi_region_access_point + +Provides a resource to manage a Multi-Region S3 Access Point associated with specified buckets. + +## Example Usage + +### Multiple AWS Buckets in Different Regions + +```terraform +provider "aws" { + region = "us-east-1" + alias = "primary_region" +} + +provider "aws" { + region = "us-west-2" + alias = "secondary_region" +} + +resource "aws_s3_bucket" "foo_bucket" { + provider = aws.primary_region + + bucket = "example-bucket-foo" +} + +resource "aws_s3_bucket" "bar_bucket" { + provider = aws.secondary_region + + bucket = "example-bucket-bar" +} + +resource "aws_s3_multi_region_access_point" "example" { + details { + name = "example" + + region { + bucket = aws_s3_bucket.foo_bucket.id + } + + region { + bucket = aws_s3_bucket.bar_bucket.id + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `account_id` - (Optional) The AWS account ID for the owner of the buckets for which you want to create a Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. +* `details` - (Required) A configuration block containing details about the Multi-Region Access Point. See [Details Configuration Block](#details-configuration) below for more details + +### Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: + +* `create` - (Default `60 minutes`) Used when creating the Multi-Region Access Point. +* `delete` - (Default `15 minutes`) Used when deleting the Multi-Region Access Point. + +### Details Configuration + +The `details` block supports the following: + +* `name` - (Required) The name of the Multi-Region Access Point. +* `public_access_block` - (Optional) Configuration block to manage the `PublicAccessBlock` configuration that you want to apply to this Multi-Region Access Point. You can enable the configuration options in any combination. See [Public Access Block Configuration](#public-access-block-configuration) below for more details. +* `region` - (Required) The Region configuration block to specify the bucket associated with the Multi-Region Access Point. See [Region Configuration](#region-configuration) below for more details. + +For more information, see the documentation on [Multi-Region Access Points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiRegionAccessPoints.html). + +### Public Access Block Configuration + +The `public_access_block` block supports the following: + +* `block_public_acls` - (Optional) Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to `true`. Enabling this setting does not affect existing policies or ACLs. When set to `true` causes the following behavior: + * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public. + * PUT Object calls fail if the request includes a public ACL. + * PUT Bucket calls fail if the request includes a public ACL. +* `block_public_policy` - (Optional) Whether Amazon S3 should block public bucket policies for buckets in this account. Defaults to `true`. Enabling this setting does not affect existing bucket policies. When set to `true` causes Amazon S3 to: + * Reject calls to PUT Bucket policy if the specified bucket policy allows public access. +* `ignore_public_acls` - (Optional) Whether Amazon S3 should ignore public ACLs for buckets in this account. Defaults to `true`. Enabling this setting does not affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. When set to `true` causes Amazon S3 to: + * Ignore all public ACLs on buckets in this account and any objects that they contain. +* `restrict_public_buckets` - (Optional) Whether Amazon S3 should restrict public bucket policies for buckets in this account. Defaults to `true`. Enabling this setting does not affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. When set to `true`: + * Only the bucket owner and AWS Services can access buckets with public policies. + +### Region Configuration + +The `region` block supports the following: + +* `bucket` - (Required) The name of the associated bucket for the Region. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `alias` - The alias for the Multi-Region Access Point. +* `arn` - Amazon Resource Name (ARN) of the Multi-Region Access Point. +* `alias` - The alias for the Multi-Region Access Point. +* `domain_name` - The DNS domain name of the S3 Multi-Region Access Point in the format _`alias`_.accesspoint.s3-global.amazonaws.com. For more information, see the documentation on [Multi-Region Access Point Requests](https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiRegionAccessPointRequests.html). +* `id` - The AWS account ID and access point name separated by a colon (`:`). +* `status` - The current status of the Multi-Region Access Point. One of: `READY`, `INCONSISTENT_ACROSS_REGIONS`, `CREATING`, `PARTIALLY_CREATED`, `PARTIALLY_DELETED`, `DELETING`. + +## Import + +Multi-Region Access Points can be imported using the `account_id` and `name` of the Multi-Region Access Point separated by a colon (`:`), e.g. + +``` +$ terraform import aws_s3_multi_region_access_point.example 123456789012:example +``` diff --git a/website/docs/r/s3_multi_region_access_point_policy.html.markdown b/website/docs/r/s3_multi_region_access_point_policy.html.markdown new file mode 100644 index 000000000000..c3c9a3301dd8 --- /dev/null +++ b/website/docs/r/s3_multi_region_access_point_policy.html.markdown @@ -0,0 +1,93 @@ +--- +subcategory: "S3" +layout: "aws" +page_title: "AWS: aws_s3_multi_region_access_point_policy" +description: |- + Provides a resource to manage a Multi-Region Access Point access control policy. +--- + +# Resource: aws_s3_multi_region_access_point_policy + +Provides a resource to manage a Multi-Region Access Point access control policy. + +## Example Usage + +### Basic Example + +```terraform +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_s3_bucket" "foo_bucket" { + bucket = "example-bucket-foo" +} + +resource "aws_s3_multi_region_access_point" "example" { + details { + name = "example" + + region { + bucket = aws_s3_bucket.foo_bucket.id + } + } +} + +resource "aws_s3_multi_region_access_point_policy" "example" { + details { + name = element(split(":", aws_s3_multi_region_access_point.example.id), 1) + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Sid" : "Example", + "Effect" : "Allow", + "Principal" : { + "AWS" : data.aws_caller_identity.current.account_id + }, + "Action" : ["s3:GetObject", "s3:PutObject"], + "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3_multi_region_access_point.example.alias}/object/*" + } + ] + }) + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `account_id` - (Optional) The AWS account ID for the owner of the Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. +* `details` - (Required) A configuration block containing details about the policy for the Multi-Region Access Point. See [Details Configuration Block](#details-configuration) below for more details + +### Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: + +* `create` - (Default `15 minutes`) Used when creating the Multi-Region Access Point Policy. +* `update` - (Default `15 minutes`) Used when updating the Multi-Region Access Point Policy. + +### Details Configuration + +The `details` block supports the following: + +* `name` - (Required) The name of the Multi-Region Access Point. +* `policy` - (Required) A valid JSON document that specifies the policy that you want to associate with this Multi-Region Access Point. Once applied, the policy can be edited, but not deleted. For more information, see the documentation on [Multi-Region Access Point Permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiRegionAccessPointPermissions.html). + +-> **NOTE:** When you update the `policy`, the update is first listed as the proposed policy. After the update is finished and all Regions have been updated, the proposed policy is listed as the established policy. If both policies have the same version number, the proposed policy is the established policy. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `established` - The last established policy for the Multi-Region Access Point. +* `id` - The AWS account ID and access point name separated by a colon (`:`). +* `proposed` - The proposed policy for the Multi-Region Access Point. + +## Import + +Multi-Region Access Point Policies can be imported using the `account_id` and `name` of the Multi-Region Access Point separated by a colon (`:`), e.g. + +``` +$ terraform import aws_s3_multi_region_access_point_policy.example 123456789012:example +``` From da98b32568c65fbf410736f7caba91a2f67c4eb7 Mon Sep 17 00:00:00 2001 From: Farhan Angullia Date: Sat, 23 Oct 2021 14:28:40 +0800 Subject: [PATCH 126/304] fix golint issue --- .../service/s3control/multi_region_access_point.go | 4 ++-- .../s3control/multi_region_access_point_policy.go | 4 ++-- internal/service/s3control/wait.go | 10 +++++++--- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/internal/service/s3control/multi_region_access_point.go b/internal/service/s3control/multi_region_access_point.go index 873c1e83ca13..3db447ae3b5f 100644 --- a/internal/service/s3control/multi_region_access_point.go +++ b/internal/service/s3control/multi_region_access_point.go @@ -161,7 +161,7 @@ func resourceMultiRegionAccessPointCreate(d *schema.ResourceData, meta interface } requestTokenARN := aws.StringValue(output.RequestTokenARN) - _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) if err != nil { return fmt.Errorf("error waiting for S3 Multi-Region Access Point (%s) to create: %s", d.Id(), err) @@ -252,7 +252,7 @@ func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface } requestTokenARN := aws.StringValue(output.RequestTokenARN) - _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutDelete)) + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutDelete)) if err != nil { return fmt.Errorf("error waiting for S3 Multi-Region Access Point (%s) to delete: %w", d.Id(), err) diff --git a/internal/service/s3control/multi_region_access_point_policy.go b/internal/service/s3control/multi_region_access_point_policy.go index a87237aff182..d146c4524c28 100644 --- a/internal/service/s3control/multi_region_access_point_policy.go +++ b/internal/service/s3control/multi_region_access_point_policy.go @@ -97,7 +97,7 @@ func resourceMultiRegionAccessPointPolicyCreate(d *schema.ResourceData, meta int } requestTokenARN := aws.StringValue(output.RequestTokenARN) - _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) if err != nil { return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) to be created: %s", d.Id(), err) @@ -167,7 +167,7 @@ func resourceMultiRegionAccessPointPolicyUpdate(d *schema.ResourceData, meta int } requestTokenARN := *output.RequestTokenARN - _, err = waitS3MultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutUpdate)) + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutUpdate)) if err != nil { return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) to update: %s", d.Id(), err) diff --git a/internal/service/s3control/wait.go b/internal/service/s3control/wait.go index 9b877ac25e47..da23615c5787 100644 --- a/internal/service/s3control/wait.go +++ b/internal/service/s3control/wait.go @@ -18,6 +18,10 @@ const ( // Maximum amount of time to wait for S3control changes to propagate propagationTimeout = 1 * time.Minute + + multiRegionAccessPointRequestSucceededMinTimeout = 5 * time.Second + + multiRegionAccessPointRequestSucceededDelay = 15 * time.Second ) func waitPublicAccessBlockConfigurationBlockPublicACLsUpdated(conn *s3control.S3Control, accountID string, expectedValue bool) (*s3control.PublicAccessBlockConfiguration, error) { @@ -92,13 +96,13 @@ func waitPublicAccessBlockConfigurationRestrictPublicBucketsUpdated(conn *s3cont return nil, err } -func waitS3MultiRegionAccessPointRequestSucceeded(conn *s3control.S3Control, accountId string, requestTokenArn string, timeout time.Duration) (*s3control.AsyncOperation, error) { +func waitMultiRegionAccessPointRequestSucceeded(conn *s3control.S3Control, accountId string, requestTokenArn string, timeout time.Duration) (*s3control.AsyncOperation, error) { //nolint:unparam stateConf := &resource.StateChangeConf{ Target: []string{RequestStatusSucceeded}, Timeout: timeout, Refresh: statusMultiRegionAccessPointRequest(conn, accountId, requestTokenArn), - MinTimeout: 5 * time.Second, - Delay: 15 * time.Second, // Wait 15 secs before starting + MinTimeout: multiRegionAccessPointRequestSucceededMinTimeout, + Delay: multiRegionAccessPointRequestSucceededDelay, // Wait 15 secs before starting } log.Printf("[DEBUG] Waiting for S3 Multi-Region Access Point request (%s) to succeed", requestTokenArn) From 38d37e29b4755d1b3b43b61a957b3c87d7b7815f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 11:25:57 -0500 Subject: [PATCH 127/304] Rename resources: 'aws_s3_multi_region_access_point' -> 'aws_s3control_multi_region_access_point' and 'aws_s3_multi_region_access_point_policy' -> 'aws_s3control_multi_region_access_point_policy'. --- internal/provider/provider.go | 14 +++++++------- .../multi_region_access_point_policy_test.go | 18 +++++++++--------- .../multi_region_access_point_test.go | 14 +++++++------- ...ol_multi_region_access_point.html.markdown} | 12 ++++++------ ...i_region_access_point_policy.html.markdown} | 18 +++++++++--------- 5 files changed, 38 insertions(+), 38 deletions(-) rename website/docs/r/{s3_multi_region_access_point.html.markdown => s3control_multi_region_access_point.html.markdown} (91%) rename website/docs/r/{s3_multi_region_access_point_policy.html.markdown => s3control_multi_region_access_point_policy.html.markdown} (81%) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 7a991c8989cd..3999b1ea157f 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1478,13 +1478,13 @@ func Provider() *schema.Provider { "aws_s3_bucket_public_access_block": s3.ResourceBucketPublicAccessBlock(), "aws_s3_object_copy": s3.ResourceObjectCopy(), - "aws_s3_access_point": s3control.ResourceAccessPoint(), - "aws_s3_account_public_access_block": s3control.ResourceAccountPublicAccessBlock(), - "aws_s3control_bucket": s3control.ResourceBucket(), - "aws_s3control_bucket_lifecycle_configuration": s3control.ResourceBucketLifecycleConfiguration(), - "aws_s3control_bucket_policy": s3control.ResourceBucketPolicy(), - "aws_s3_multi_region_access_point": s3control.ResourceMultiRegionAccessPoint(), - "aws_s3_multi_region_access_point_policy": s3control.ResourceMultiRegionAccessPointPolicy(), + "aws_s3_access_point": s3control.ResourceAccessPoint(), + "aws_s3_account_public_access_block": s3control.ResourceAccountPublicAccessBlock(), + "aws_s3control_bucket": s3control.ResourceBucket(), + "aws_s3control_bucket_lifecycle_configuration": s3control.ResourceBucketLifecycleConfiguration(), + "aws_s3control_bucket_policy": s3control.ResourceBucketPolicy(), + "aws_s3control_multi_region_access_point": s3control.ResourceMultiRegionAccessPoint(), + "aws_s3control_multi_region_access_point_policy": s3control.ResourceMultiRegionAccessPointPolicy(), "aws_s3outposts_endpoint": s3outposts.ResourceEndpoint(), diff --git a/internal/service/s3control/multi_region_access_point_policy_test.go b/internal/service/s3control/multi_region_access_point_policy_test.go index 25f6212e4e54..1b3d086a2065 100644 --- a/internal/service/s3control/multi_region_access_point_policy_test.go +++ b/internal/service/s3control/multi_region_access_point_policy_test.go @@ -17,7 +17,7 @@ import ( func TestAccS3ControlMultiRegionAccessPointPolicy_basic(t *testing.T) { var v s3control.MultiRegionAccessPointPolicyDocument - resourceName := "aws_s3_multi_region_access_point_policy.test" + resourceName := "aws_s3control_multi_region_access_point_policy.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) multiRegionAccessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -56,8 +56,8 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_basic(t *testing.T) { func TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint(t *testing.T) { var v s3control.MultiRegionAccessPointReport - parentResourceName := "aws_s3_multi_region_access_point.test" - resourceName := "aws_s3_multi_region_access_point_policy.test" + parentResourceName := "aws_s3control_multi_region_access_point.test" + resourceName := "aws_s3control_multi_region_access_point_policy.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -86,7 +86,7 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPo func TestAccS3ControlMultiRegionAccessPointPolicy_details_policy(t *testing.T) { var v1, v2 s3control.MultiRegionAccessPointPolicyDocument - resourceName := "aws_s3_multi_region_access_point_policy.test" + resourceName := "aws_s3control_multi_region_access_point_policy.test" multiRegionAccessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -125,7 +125,7 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_details_policy(t *testing.T) { func TestAccS3ControlMultiRegionAccessPointPolicy_details_name(t *testing.T) { var v1, v2 s3control.MultiRegionAccessPointPolicyDocument - resourceName := "aws_s3_multi_region_access_point_policy.test" + resourceName := "aws_s3control_multi_region_access_point_policy.test" multiRegionAccessPointName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) multiRegionAccessPointName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -210,7 +210,7 @@ func testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, multiRegionAcce data "aws_caller_identity" "current" {} data "aws_partition" "current" {} -resource "aws_s3_multi_region_access_point_policy" "test" { +resource "aws_s3control_multi_region_access_point_policy" "test" { details { name = %[1]q policy = jsonencode({ @@ -223,7 +223,7 @@ resource "aws_s3_multi_region_access_point_policy" "test" { "AWS" : data.aws_caller_identity.current.account_id }, "Action" : "s3:GetObject", - "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3_multi_region_access_point.test.alias}/object/*" + "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3control_multi_region_access_point.test.alias}/object/*" } ] }) @@ -239,7 +239,7 @@ func testAccMultiRegionAccessPointPolicyConfig_updatedStatement(bucketName, mult data "aws_caller_identity" "current" {} data "aws_partition" "current" {} -resource "aws_s3_multi_region_access_point_policy" "test" { +resource "aws_s3control_multi_region_access_point_policy" "test" { details { name = %[1]q policy = jsonencode({ @@ -252,7 +252,7 @@ resource "aws_s3_multi_region_access_point_policy" "test" { "AWS" : data.aws_caller_identity.current.account_id }, "Action" : "s3:PutObject", - "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3_multi_region_access_point.test.alias}/object/*" + "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3control_multi_region_access_point.test.alias}/object/*" } ] }) diff --git a/internal/service/s3control/multi_region_access_point_test.go b/internal/service/s3control/multi_region_access_point_test.go index 535219e7fb19..240b513dd2df 100644 --- a/internal/service/s3control/multi_region_access_point_test.go +++ b/internal/service/s3control/multi_region_access_point_test.go @@ -20,7 +20,7 @@ import ( func TestAccS3ControlMultiRegionAccessPoint_basic(t *testing.T) { var v s3control.MultiRegionAccessPointReport - resourceName := "aws_s3_multi_region_access_point.test" + resourceName := "aws_s3control_multi_region_access_point.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -64,7 +64,7 @@ func TestAccS3ControlMultiRegionAccessPoint_basic(t *testing.T) { func TestAccS3ControlMultiRegionAccessPoint_disappears(t *testing.T) { var v s3control.MultiRegionAccessPointReport - resourceName := "aws_s3_multi_region_access_point.test" + resourceName := "aws_s3control_multi_region_access_point.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -91,7 +91,7 @@ func TestAccS3ControlMultiRegionAccessPoint_disappears(t *testing.T) { func TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock(t *testing.T) { var v s3control.MultiRegionAccessPointReport - resourceName := "aws_s3_multi_region_access_point.test" + resourceName := "aws_s3control_multi_region_access_point.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -126,7 +126,7 @@ func TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock(t *testing.T) { func TestAccS3ControlMultiRegionAccessPoint_name(t *testing.T) { var v1, v2 s3control.MultiRegionAccessPointReport - resourceName := "aws_s3_multi_region_access_point.test" + resourceName := "aws_s3control_multi_region_access_point.test" rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -184,7 +184,7 @@ func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_multi_region_access_point" { + if rs.Type != "aws_s3control_multi_region_access_point" { continue } @@ -262,7 +262,7 @@ resource "aws_s3_bucket" "test" { force_destroy = true } -resource "aws_s3_multi_region_access_point" "test" { +resource "aws_s3control_multi_region_access_point" "test" { details { name = %[2]q @@ -281,7 +281,7 @@ resource "aws_s3_bucket" "test" { force_destroy = true } -resource "aws_s3_multi_region_access_point" "test" { +resource "aws_s3control_multi_region_access_point" "test" { details { name = %[2]q diff --git a/website/docs/r/s3_multi_region_access_point.html.markdown b/website/docs/r/s3control_multi_region_access_point.html.markdown similarity index 91% rename from website/docs/r/s3_multi_region_access_point.html.markdown rename to website/docs/r/s3control_multi_region_access_point.html.markdown index d13028ef155d..8414e2ccb398 100644 --- a/website/docs/r/s3_multi_region_access_point.html.markdown +++ b/website/docs/r/s3control_multi_region_access_point.html.markdown @@ -1,14 +1,14 @@ --- subcategory: "S3" layout: "aws" -page_title: "AWS: aws_s3_multi_region_access_point" +page_title: "AWS: aws_s3control_multi_region_access_point" description: |- - Provides a resource to manage a Multi-Region S3 Access Point associated with specified buckets. + Provides a resource to manage an S3 Multi-Region Access Point associated with specified buckets. --- -# Resource: aws_s3_multi_region_access_point +# Resource: aws_s3control_multi_region_access_point -Provides a resource to manage a Multi-Region S3 Access Point associated with specified buckets. +Provides a resource to manage an S3 Multi-Region Access Point associated with specified buckets. ## Example Usage @@ -37,7 +37,7 @@ resource "aws_s3_bucket" "bar_bucket" { bucket = "example-bucket-bar" } -resource "aws_s3_multi_region_access_point" "example" { +resource "aws_s3control_multi_region_access_point" "example" { details { name = "example" @@ -113,5 +113,5 @@ In addition to all arguments above, the following attributes are exported: Multi-Region Access Points can be imported using the `account_id` and `name` of the Multi-Region Access Point separated by a colon (`:`), e.g. ``` -$ terraform import aws_s3_multi_region_access_point.example 123456789012:example +$ terraform import aws_s3control_multi_region_access_point.example 123456789012:example ``` diff --git a/website/docs/r/s3_multi_region_access_point_policy.html.markdown b/website/docs/r/s3control_multi_region_access_point_policy.html.markdown similarity index 81% rename from website/docs/r/s3_multi_region_access_point_policy.html.markdown rename to website/docs/r/s3control_multi_region_access_point_policy.html.markdown index c3c9a3301dd8..0fea0950cd8f 100644 --- a/website/docs/r/s3_multi_region_access_point_policy.html.markdown +++ b/website/docs/r/s3control_multi_region_access_point_policy.html.markdown @@ -1,14 +1,14 @@ --- subcategory: "S3" layout: "aws" -page_title: "AWS: aws_s3_multi_region_access_point_policy" +page_title: "AWS: aws_s3control_multi_region_access_point_policy" description: |- - Provides a resource to manage a Multi-Region Access Point access control policy. + Provides a resource to manage an S3 Multi-Region Access Point access control policy. --- -# Resource: aws_s3_multi_region_access_point_policy +# Resource: aws_s3control_multi_region_access_point_policy -Provides a resource to manage a Multi-Region Access Point access control policy. +Provides a resource to manage an S3 Multi-Region Access Point access control policy. ## Example Usage @@ -22,7 +22,7 @@ resource "aws_s3_bucket" "foo_bucket" { bucket = "example-bucket-foo" } -resource "aws_s3_multi_region_access_point" "example" { +resource "aws_s3control_multi_region_access_point" "example" { details { name = "example" @@ -32,9 +32,9 @@ resource "aws_s3_multi_region_access_point" "example" { } } -resource "aws_s3_multi_region_access_point_policy" "example" { +resource "aws_s3control_multi_region_access_point_policy" "example" { details { - name = element(split(":", aws_s3_multi_region_access_point.example.id), 1) + name = element(split(":", aws_s3control_multi_region_access_point.example.id), 1) policy = jsonencode({ "Version" : "2012-10-17", "Statement" : [ @@ -45,7 +45,7 @@ resource "aws_s3_multi_region_access_point_policy" "example" { "AWS" : data.aws_caller_identity.current.account_id }, "Action" : ["s3:GetObject", "s3:PutObject"], - "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3_multi_region_access_point.example.alias}/object/*" + "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3control_multi_region_access_point.example.alias}/object/*" } ] }) @@ -89,5 +89,5 @@ In addition to all arguments above, the following attributes are exported: Multi-Region Access Point Policies can be imported using the `account_id` and `name` of the Multi-Region Access Point separated by a colon (`:`), e.g. ``` -$ terraform import aws_s3_multi_region_access_point_policy.example 123456789012:example +$ terraform import aws_s3control_multi_region_access_point_policy.example 123456789012:example ``` From 06cace6eeb521c6d3bccbbe7b930132af60de212 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 11:29:38 -0500 Subject: [PATCH 128/304] Tweak CHANGELOG entry. --- .changelog/21702.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/21702.txt b/.changelog/21702.txt index 5d88af078c39..068e9eefc7fd 100644 --- a/.changelog/21702.txt +++ b/.changelog/21702.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_emr_cluster: Add support for `auto_termination_policy`. +resource/aws_emr_cluster: Add `auto_termination_policy` argument ``` \ No newline at end of file From 3b16893956e7f549fb4c11da073bb47ceae9c40a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 11:59:09 -0500 Subject: [PATCH 129/304] Add CHANGELOG entries. --- .changelog/21060.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/21060.txt diff --git a/.changelog/21060.txt b/.changelog/21060.txt new file mode 100644 index 000000000000..6d2ed8db6efd --- /dev/null +++ b/.changelog/21060.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_s3control_multi_region_access_point +``` + +```release-note:new-resource +aws_s3control_multi_region_access_point_policy +``` \ No newline at end of file From 2ffb4e7a57153a27f5c18b0964d7d3cb311b7a6f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 11:59:57 -0500 Subject: [PATCH 130/304] Change documentation sub-category to 'S3 Control'. --- .../docs/r/s3control_multi_region_access_point.html.markdown | 2 +- .../r/s3control_multi_region_access_point_policy.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/s3control_multi_region_access_point.html.markdown b/website/docs/r/s3control_multi_region_access_point.html.markdown index 8414e2ccb398..1b19ee3edbcc 100644 --- a/website/docs/r/s3control_multi_region_access_point.html.markdown +++ b/website/docs/r/s3control_multi_region_access_point.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "S3 Control" layout: "aws" page_title: "AWS: aws_s3control_multi_region_access_point" description: |- diff --git a/website/docs/r/s3control_multi_region_access_point_policy.html.markdown b/website/docs/r/s3control_multi_region_access_point_policy.html.markdown index 0fea0950cd8f..eb26b78148a8 100644 --- a/website/docs/r/s3control_multi_region_access_point_policy.html.markdown +++ b/website/docs/r/s3control_multi_region_access_point_policy.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "S3 Control" layout: "aws" page_title: "AWS: aws_s3control_multi_region_access_point_policy" description: |- From 8447efdc5c51af9d93439ab24acc5230d180e2c9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 12:06:05 -0500 Subject: [PATCH 131/304] r/aws_emr_cluster: Modify finders to allow StateChangeReason to be reported on waiter failure. --- internal/service/emr/errors.go | 5 +++++ internal/service/emr/find.go | 31 +++++++++++++++++++------------ internal/service/emr/status.go | 8 ++++++-- internal/service/emr/wait.go | 9 ++++++++- 4 files changed, 38 insertions(+), 15 deletions(-) create mode 100644 internal/service/emr/errors.go diff --git a/internal/service/emr/errors.go b/internal/service/emr/errors.go new file mode 100644 index 000000000000..5eec97d28dbd --- /dev/null +++ b/internal/service/emr/errors.go @@ -0,0 +1,5 @@ +package emr + +const ( + ErrCodeClusterNotFound = "ClusterNotFound" +) diff --git a/internal/service/emr/find.go b/internal/service/emr/find.go index 1edae0fc4ada..7d0611a7efe2 100644 --- a/internal/service/emr/find.go +++ b/internal/service/emr/find.go @@ -8,14 +8,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func FindClusterByID(conn *emr.EMR, id string) (*emr.Cluster, error) { - input := &emr.DescribeClusterInput{ - ClusterId: aws.String(id), - } - +func FindCluster(conn *emr.EMR, input *emr.DescribeClusterInput) (*emr.Cluster, error) { output, err := conn.DescribeCluster(input) - if tfawserr.ErrCodeEquals(err, "ClusterNotFound") || tfawserr.ErrMessageContains(err, emr.ErrCodeInvalidRequestException, "is not valid") { + if tfawserr.ErrCodeEquals(err, ErrCodeClusterNotFound) || tfawserr.ErrMessageContains(err, emr.ErrCodeInvalidRequestException, "is not valid") { return nil, &resource.NotFoundError{ LastError: err, LastRequest: input, @@ -26,19 +22,30 @@ func FindClusterByID(conn *emr.EMR, id string) (*emr.Cluster, error) { return nil, err } - if output == nil || output.Cluster == nil { + if output == nil || output.Cluster == nil || output.Cluster.Status == nil { return nil, tfresource.NewEmptyResultError(input) } - status := output.Cluster.Status - state := aws.StringValue(status.State) + return output.Cluster, nil +} + +func FindClusterByID(conn *emr.EMR, id string) (*emr.Cluster, error) { + input := &emr.DescribeClusterInput{ + ClusterId: aws.String(id), + } + + output, err := FindCluster(conn, input) - if state == emr.ClusterStateTerminated || state == emr.ClusterStateTerminatedWithErrors { + if err != nil { + return nil, err + } + + if state := aws.StringValue(output.Status.State); state == emr.ClusterStateTerminated || state == emr.ClusterStateTerminatedWithErrors { return nil, &resource.NotFoundError{ - Message: aws.StringValue(status.StateChangeReason.Message), + Message: state, LastRequest: input, } } - return output.Cluster, nil + return output, nil } diff --git a/internal/service/emr/status.go b/internal/service/emr/status.go index 9bd3658d0424..3f69b175ec5a 100644 --- a/internal/service/emr/status.go +++ b/internal/service/emr/status.go @@ -2,14 +2,18 @@ package emr import ( "github.com/aws/aws-sdk-go/aws" - emr "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/service/emr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func statusCluster(conn *emr.EMR, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindClusterByID(conn, id) + input := &emr.DescribeClusterInput{ + ClusterId: aws.String(id), + } + + output, err := FindCluster(conn, input) if tfresource.NotFound(err) { return nil, "", nil diff --git a/internal/service/emr/wait.go b/internal/service/emr/wait.go index 0909f0e195cf..43b335728620 100644 --- a/internal/service/emr/wait.go +++ b/internal/service/emr/wait.go @@ -1,10 +1,13 @@ package emr import ( + "fmt" "time" - emr "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/emr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) const ( @@ -32,6 +35,10 @@ func waitClusterCreated(conn *emr.EMR, id string) (*emr.Cluster, error) { outputRaw, err := stateConf.WaitForState() if output, ok := outputRaw.(*emr.Cluster); ok { + if stateChangeReason := output.Status.StateChangeReason; stateChangeReason != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateChangeReason.Code), aws.StringValue(stateChangeReason.Message))) + } + return output, err } From a04986d2c0a2ffe592ec42b8a05a7f25a33e496c Mon Sep 17 00:00:00 2001 From: Ashish Date: Fri, 12 Nov 2021 09:19:46 -0800 Subject: [PATCH 132/304] Direct users to documentation --- .../docs/d/ec2_transit_gateway.html.markdown | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/website/docs/d/ec2_transit_gateway.html.markdown b/website/docs/d/ec2_transit_gateway.html.markdown index 44b106428d8a..fd27295078b7 100644 --- a/website/docs/d/ec2_transit_gateway.html.markdown +++ b/website/docs/d/ec2_transit_gateway.html.markdown @@ -40,25 +40,9 @@ The following arguments are supported: ### filter Argument Reference -* `name` - (Required) Name of the filter. +* `name` - (Required) The name of the field to filter by, as defined by the [underlying AWS API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html). * `values` - (Required) List of one or more values for the filter. -#### Supported values for Filters - -One or more filters are supported. The possible values are: - -* `options.propagation-default-route-table-id` - The ID of the default propagation route table. -* `options.amazon-side-asn` - The private ASN for the Amazon side of a BGP session. -* `options.association-default-route-table-id` - The ID of the default association route table. -* `options.auto-accept-shared-attachments` - Indicates whether there is automatic acceptance of attachment requests (enable | disable ). -* `options.default-route-table-association` - Indicates whether resource attachments are automatically associated with the default association route table (enable | disable ). -* `options.default-route-table-propagation` - Indicates whether resource attachments automatically propagate routes to the default propagation route table (enable | disable ). -* `options.dns-support` - Indicates whether DNS support is enabled (enable | disable ). -* `options.vpn-ecmp-support` - Indicates whether Equal Cost Multipath Protocol support is enabled (enable | disable ). -* `owner-id` - The ID of the Amazon Web Services account that owns the transit gateway. -* `state` - The state of the transit gateway (available | deleted | deleting | modifying | pending ). -* `transit-gateway-id` - The ID of the transit gateway - ## Attribute Reference In addition to all arguments above, the following attributes are exported: From 731706577955325077c352fb43e7eaa976824623 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 12 Nov 2021 18:03:58 +0000 Subject: [PATCH 133/304] Update CHANGELOG.md for #21743 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ebee692cee0..99cb713d05eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,9 @@ ## 3.66.0 (Unreleased) + +BUG FIXES: + +* resource/aws_security_group: Fix lack of pagination when describing security groups ([#21743](https://github.com/hashicorp/terraform-provider-aws/issues/21743)) + ## 3.65.0 (November 11, 2021) FEATURES: From 7d5169edd6e02a99fe26a35b6b2ec7cd5ff16ab3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 13:09:08 -0500 Subject: [PATCH 134/304] r/aws_s3control_multi_region_access_point: 'FindMultiRegionAccessPointByName' -> 'FindMultiRegionAccessPointByAccountIDAndName'. r/aws_s3control_multi_region_access_point_policy: 'FindMultiRegionAccessPointPolicyDocumentByName' -> 'FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName'. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlMultiRegionAccessPoint_\|TestAccS3ControlMultiRegionAccessPointPolicy_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlMultiRegionAccessPoint_\|TestAccS3ControlMultiRegionAccessPointPolicy_ -timeout 180m === RUN TestAccS3ControlMultiRegionAccessPointPolicy_basic === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_basic === RUN TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint === RUN TestAccS3ControlMultiRegionAccessPointPolicy_details_policy === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_details_policy === RUN TestAccS3ControlMultiRegionAccessPointPolicy_details_name === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_details_name === RUN TestAccS3ControlMultiRegionAccessPoint_basic === PAUSE TestAccS3ControlMultiRegionAccessPoint_basic === RUN TestAccS3ControlMultiRegionAccessPoint_disappears === PAUSE TestAccS3ControlMultiRegionAccessPoint_disappears === RUN TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock === PAUSE TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock === RUN TestAccS3ControlMultiRegionAccessPoint_name === PAUSE TestAccS3ControlMultiRegionAccessPoint_name === CONT TestAccS3ControlMultiRegionAccessPointPolicy_basic === CONT TestAccS3ControlMultiRegionAccessPoint_disappears === CONT TestAccS3ControlMultiRegionAccessPoint_name === CONT TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock === CONT TestAccS3ControlMultiRegionAccessPointPolicy_details_name === CONT TestAccS3ControlMultiRegionAccessPoint_basic === CONT TestAccS3ControlMultiRegionAccessPointPolicy_details_policy === CONT TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint --- PASS: TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock (700.81s) --- PASS: TestAccS3ControlMultiRegionAccessPoint_basic (701.57s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_basic (702.98s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint (704.67s) --- PASS: TestAccS3ControlMultiRegionAccessPoint_disappears (707.16s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_details_policy (832.41s) --- PASS: TestAccS3ControlMultiRegionAccessPoint_name (1068.44s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_details_name (1102.97s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 1106.706s --- internal/service/s3control/errors.go | 2 +- internal/service/s3control/find.go | 33 +++++++++++++------ .../s3control/multi_region_access_point.go | 26 ++++++--------- .../multi_region_access_point_policy.go | 21 +++++------- .../multi_region_access_point_policy_test.go | 15 +++++---- .../multi_region_access_point_test.go | 29 ++++++++-------- 6 files changed, 63 insertions(+), 63 deletions(-) diff --git a/internal/service/s3control/errors.go b/internal/service/s3control/errors.go index 972877fb8263..4ea41c00def2 100644 --- a/internal/service/s3control/errors.go +++ b/internal/service/s3control/errors.go @@ -6,5 +6,5 @@ package s3control const ( errCodeNoSuchAccessPoint = "NoSuchAccessPoint" errCodeNoSuchAccessPointPolicy = "NoSuchAccessPointPolicy" - ErrCodeNoSuchMultiRegionAccessPoint = "NoSuchMultiRegionAccessPoint" + errCodeNoSuchMultiRegionAccessPoint = "NoSuchMultiRegionAccessPoint" ) diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index 49f38dca03be..b7311212e3d8 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -1,10 +1,11 @@ package s3control import ( - "log" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func findPublicAccessBlockConfiguration(conn *s3control.S3Control, accountID string) (*s3control.PublicAccessBlockConfiguration, error) { @@ -25,28 +26,33 @@ func findPublicAccessBlockConfiguration(conn *s3control.S3Control, accountID str return output.PublicAccessBlockConfiguration, nil } -func FindMultiRegionAccessPointByName(conn *s3control.S3Control, accountId string, name string) (*s3control.MultiRegionAccessPointReport, error) { +func FindMultiRegionAccessPointByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (*s3control.MultiRegionAccessPointReport, error) { input := &s3control.GetMultiRegionAccessPointInput{ - AccountId: aws.String(accountId), + AccountId: aws.String(accountID), Name: aws.String(name), } - log.Printf("[DEBUG] Getting S3 Multi-Region Access Point (%s): %s", name, input) - output, err := conn.GetMultiRegionAccessPoint(input) + if tfawserr.ErrCodeEquals(err, errCodeNoSuchMultiRegionAccessPoint) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { return nil, err } if output == nil || output.AccessPoint == nil { - return nil, nil + return nil, tfresource.NewEmptyResultError(input) } return output.AccessPoint, nil } -func FindMultiRegionAccessPointPolicyDocumentByName(conn *s3control.S3Control, accountID string, name string) (*s3control.MultiRegionAccessPointPolicyDocument, error) { +func FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (*s3control.MultiRegionAccessPointPolicyDocument, error) { input := &s3control.GetMultiRegionAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), @@ -54,12 +60,19 @@ func FindMultiRegionAccessPointPolicyDocumentByName(conn *s3control.S3Control, a output, err := conn.GetMultiRegionAccessPointPolicy(input) + if tfawserr.ErrCodeEquals(err, errCodeNoSuchMultiRegionAccessPoint) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { return nil, err } - if output == nil { - return nil, nil + if output == nil || output.Policy == nil { + return nil, tfresource.NewEmptyResultError(input) } return output.Policy, nil diff --git a/internal/service/s3control/multi_region_access_point.go b/internal/service/s3control/multi_region_access_point.go index 3db447ae3b5f..66e417df0d50 100644 --- a/internal/service/s3control/multi_region_access_point.go +++ b/internal/service/s3control/multi_region_access_point.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -184,12 +185,9 @@ func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{} return err } - output, err := conn.GetMultiRegionAccessPoint(&s3control.GetMultiRegionAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + accessPoint, err := FindMultiRegionAccessPointByAccountIDAndName(conn, accountId, name) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ErrCodeNoSuchMultiRegionAccessPoint) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Multi-Region Access Point (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -199,25 +197,21 @@ func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error reading S3 Multi-Region Access Point (%s): %w", d.Id(), err) } - if output == nil { - return fmt.Errorf("error reading S3 Multi-Region Access Point (%s): empty response", d.Id()) - } - d.Set("account_id", accountId) - d.Set("alias", output.AccessPoint.Alias) - d.Set("domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.accesspoint.s3-global", aws.StringValue(output.AccessPoint.Alias)))) - d.Set("status", output.AccessPoint.Status) + d.Set("alias", accessPoint.Alias) + d.Set("domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.accesspoint.s3-global", aws.StringValue(accessPoint.Alias)))) + d.Set("status", accessPoint.Status) multiRegionAccessPointARN := arn.ARN{ AccountID: accountId, Partition: meta.(*conns.AWSClient).Partition, - Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.AccessPoint.Alias)), + Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(accessPoint.Alias)), Service: "s3", } d.Set("arn", multiRegionAccessPointARN.String()) - if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointDetails(output.AccessPoint)}); err != nil { + if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointDetails(accessPoint)}); err != nil { return fmt.Errorf("error setting details: %s", err) } @@ -243,12 +237,12 @@ func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface }, }) - if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchMultiRegionAccessPoint) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchMultiRegionAccessPoint) { return nil } if err != nil { - return fmt.Errorf("error deleting S3 Multi-Region Access Point (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting S3 Multi-Region Access Point (%s): %w", d.Id(), err) } requestTokenARN := aws.StringValue(output.RequestTokenARN) diff --git a/internal/service/s3control/multi_region_access_point_policy.go b/internal/service/s3control/multi_region_access_point_policy.go index d146c4524c28..e2118f46deac 100644 --- a/internal/service/s3control/multi_region_access_point_policy.go +++ b/internal/service/s3control/multi_region_access_point_policy.go @@ -7,10 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -119,27 +119,22 @@ func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta inter return err } - policyOutput, err := conn.GetMultiRegionAccessPointPolicy(&s3control.GetMultiRegionAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + policyDocument, err := FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn, accountId, name) - if tfawserr.ErrCodeEquals(err, ErrCodeNoSuchMultiRegionAccessPoint) { - log.Printf("[WARN] S3 Multi-Region Access Point (%s) not found, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] S3 Multi-Region Access Point Policy (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return fmt.Errorf("error reading S3 Multi-Region Access Point (%s) policy: %s", d.Id(), err) + return fmt.Errorf("error reading S3 Multi-Region Access Point Policy (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] S3 Multi-Region Access Point policy output: %s", policyOutput) - d.Set("account_id", accountId) - d.Set("established", policyOutput.Policy.Established.Policy) - d.Set("proposed", policyOutput.Policy.Proposed.Policy) - d.Set("details", []interface{}{policyDocumentToDetailsMap(aws.String(name), policyOutput.Policy)}) + d.Set("established", policyDocument.Established.Policy) + d.Set("proposed", policyDocument.Proposed.Policy) + d.Set("details", []interface{}{policyDocumentToDetailsMap(aws.String(name), policyDocument)}) return nil } diff --git a/internal/service/s3control/multi_region_access_point_policy_test.go b/internal/service/s3control/multi_region_access_point_policy_test.go index 1b3d086a2065..f4b5f7e94fc5 100644 --- a/internal/service/s3control/multi_region_access_point_policy_test.go +++ b/internal/service/s3control/multi_region_access_point_policy_test.go @@ -164,13 +164,17 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_details_name(t *testing.T) { }) } -func testAccCheckMultiRegionAccessPointPolicyExists(n string, m *s3control.MultiRegionAccessPointPolicyDocument) resource.TestCheckFunc { +func testAccCheckMultiRegionAccessPointPolicyExists(n string, v *s3control.MultiRegionAccessPointPolicyDocument) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } + if rs.Primary.ID == "" { + return fmt.Errorf("No S3 Multi-Region Access Point Policy ID is set") + } + accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) if err != nil { return err @@ -178,18 +182,15 @@ func testAccCheckMultiRegionAccessPointPolicyExists(n string, m *s3control.Multi conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - policyDocument, err := tfs3control.FindMultiRegionAccessPointPolicyDocumentByName(conn, accountId, name) + output, err := tfs3control.FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn, accountId, name) if err != nil { return err } - if policyDocument != nil { - *m = *policyDocument - return nil - } + *v = *output - return fmt.Errorf("Multi-Region Access Point Policy not found") + return nil } } diff --git a/internal/service/s3control/multi_region_access_point_test.go b/internal/service/s3control/multi_region_access_point_test.go index 240b513dd2df..f0c7df78c94b 100644 --- a/internal/service/s3control/multi_region_access_point_test.go +++ b/internal/service/s3control/multi_region_access_point_test.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/s3control" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -16,6 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccS3ControlMultiRegionAccessPoint_basic(t *testing.T) { @@ -193,12 +193,9 @@ func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { return err } - resp, err := conn.GetMultiRegionAccessPoint(&s3control.GetMultiRegionAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + _, err = tfs3control.FindMultiRegionAccessPointByAccountIDAndName(conn, accountId, name) - if tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchMultiRegionAccessPoint) { + if tfresource.NotFound(err) { continue } @@ -206,20 +203,23 @@ func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { return err } - if resp != nil && resp.AccessPoint != nil && fmt.Sprintf("%s:%s", accountId, aws.StringValue(resp.AccessPoint.Name)) == rs.Primary.ID { - return fmt.Errorf("S3 Multi-Region Access Point with ID %v still exists", rs.Primary.ID) - } + return fmt.Errorf("S3 Multi-Region Access Point %s still exists", rs.Primary.ID) } + return nil } -func testAccCheckMultiRegionAccessPointExists(n string, m *s3control.MultiRegionAccessPointReport) resource.TestCheckFunc { +func testAccCheckMultiRegionAccessPointExists(n string, v *s3control.MultiRegionAccessPointReport) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } + if rs.Primary.ID == "" { + return fmt.Errorf("No S3 Multi-Region Access Point ID is set") + } + accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) if err != nil { return err @@ -227,18 +227,15 @@ func testAccCheckMultiRegionAccessPointExists(n string, m *s3control.MultiRegion conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - multiRegionAccessPoint, err := tfs3control.FindMultiRegionAccessPointByName(conn, accountId, name) + output, err := tfs3control.FindMultiRegionAccessPointByAccountIDAndName(conn, accountId, name) if err != nil { return err } - if multiRegionAccessPoint != nil { - *m = *multiRegionAccessPoint - return nil - } + *v = *output - return fmt.Errorf("Multi-Region Access Point not found") + return nil } } From 0acf6245e5f9791dccef3eb90bb6d17edda5f362 Mon Sep 17 00:00:00 2001 From: Zoe Helding Date: Fri, 12 Nov 2021 12:11:53 -0600 Subject: [PATCH 135/304] Amend PR labeler elasticsearch dir elasticsearchservice -> elasticsearch --- .github/labeler-pr-triage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/labeler-pr-triage.yml b/.github/labeler-pr-triage.yml index 4c55b45a9915..2e789cfc1c4c 100644 --- a/.github/labeler-pr-triage.yml +++ b/.github/labeler-pr-triage.yml @@ -289,7 +289,7 @@ service/elasticbeanstalk: - 'internal/service/elasticbeanstalk/**/*' - 'website/**/elastic_beanstalk_*' service/elasticsearch: - - 'internal/service/elasticsearchservice/**/*' + - 'internal/service/elasticsearch/**/*' - 'website/**/elasticsearch_*' service/elb: - 'internal/service/elb/**/*' From aa81f0782fad85f7f83d7881a191a879e1188bad Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 12 Nov 2021 18:31:09 +0000 Subject: [PATCH 136/304] Update CHANGELOG.md for #21756 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99cb713d05eb..d60018092f54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.66.0 (Unreleased) +ENHANCEMENTS: + +* resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) + BUG FIXES: * resource/aws_security_group: Fix lack of pagination when describing security groups ([#21743](https://github.com/hashicorp/terraform-provider-aws/issues/21743)) From e02aa042684a5e1c15bf112c21bf7e5c57379533 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 12 Nov 2021 11:25:22 -0800 Subject: [PATCH 137/304] The `CreateDirectoryConfig` operation is synchronous, so we don't need to `Retry` --- .../service/appstream/directory_config.go | 31 ++++++------------- internal/service/appstream/wait.go | 2 -- 2 files changed, 9 insertions(+), 24 deletions(-) diff --git a/internal/service/appstream/directory_config.go b/internal/service/appstream/directory_config.go index 9b02e4248721..45c06ee829ba 100644 --- a/internal/service/appstream/directory_config.go +++ b/internal/service/appstream/directory_config.go @@ -10,12 +10,10 @@ import ( "github.com/aws/aws-sdk-go/service/appstream" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/flex" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func ResourceDirectoryConfig() *schema.Resource { @@ -70,32 +68,21 @@ func ResourceDirectoryConfig() *schema.Resource { func resourceDirectoryConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*conns.AWSClient).AppStreamConn + + directoryName := d.Get("directory_name").(string) input := &appstream.CreateDirectoryConfigInput{ - DirectoryName: aws.String(d.Get("directory_name").(string)), + DirectoryName: aws.String(directoryName), OrganizationalUnitDistinguishedNames: flex.ExpandStringSet(d.Get("organizational_unit_distinguished_names").(*schema.Set)), ServiceAccountCredentials: expandServiceAccountCredentials(d.Get("service_account_credentials").([]interface{})), } - var output *appstream.CreateDirectoryConfigOutput - err := resource.RetryContext(ctx, directoryConfigTimeout, func() *resource.RetryError { - out, err := conn.CreateDirectoryConfigWithContext(ctx, input) - if err != nil { - if tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - output = out - - return nil - }) - - if tfresource.TimedOut(err) { - output, err = conn.CreateDirectoryConfigWithContext(ctx, input) - } + output, err := conn.CreateDirectoryConfigWithContext(ctx, input) if err != nil { - return diag.FromErr(fmt.Errorf("error creating Appstream DirectoryConfig (%s): %w", d.Id(), err)) + return diag.FromErr(fmt.Errorf("error creating Appstream DirectoryConfig (%s): %w", directoryName, err)) + } + + if output == nil || output.DirectoryConfig == nil { + return diag.Errorf("error creating AppStream DirectoryConfig (%s): empty response", directoryName) } d.SetId(aws.StringValue(output.DirectoryConfig.DirectoryName)) diff --git a/internal/service/appstream/wait.go b/internal/service/appstream/wait.go index 18226ca77f3f..b0978861b26f 100644 --- a/internal/service/appstream/wait.go +++ b/internal/service/appstream/wait.go @@ -23,8 +23,6 @@ const ( // imageBuilderStateTimeout Maximum amount of time to wait for the statusImageBuilderState to be RUNNING // or for the ImageBuilder to be deleted imageBuilderStateTimeout = 60 * time.Minute - // directoryConfigTimeout Maximum amount of time to wait for DirectoryConfig operation eventual consistency - directoryConfigTimeout = 4 * time.Minute ) // waitStackStateDeleted waits for a deleted stack From 98cf5e1c617e5a64843cfe5a3fce588985ab89f4 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 12 Nov 2021 11:26:56 -0800 Subject: [PATCH 138/304] These tests don't need the `AmazonAppStreamServiceAccess` role --- internal/service/appstream/directory_config_test.go | 13 +++---------- internal/service/appstream/fleet_test.go | 2 +- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index be695a2a8b5f..654d7c824a43 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -25,10 +25,7 @@ func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { rPasswordUpdated := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckHasIAMRole(t, "AmazonAppStreamServiceAccess") - }, + PreCheck: func() { acctest.PreCheck(t) }, ProviderFactories: acctest.ProviderFactories, CheckDestroy: testAccCheckDirectoryConfigDestroy, ErrorCheck: acctest.ErrorCheck(t, appstream.EndpointsID), @@ -49,11 +46,10 @@ func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), resource.TestCheckResourceAttr(resourceName, "directory_name", rName), + acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "1"), resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_name", rUserNameUpdated), resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_password", rPasswordUpdated), - - acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), ), }, { @@ -74,10 +70,7 @@ func TestAccAppStreamDirectoryConfig_disappears(t *testing.T) { rPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckHasIAMRole(t, "AmazonAppStreamServiceAccess") - }, + PreCheck: func() { acctest.PreCheck(t) }, ProviderFactories: acctest.ProviderFactories, CheckDestroy: testAccCheckDirectoryConfigDestroy, ErrorCheck: acctest.ErrorCheck(t, appstream.EndpointsID), diff --git a/internal/service/appstream/fleet_test.go b/internal/service/appstream/fleet_test.go index 755e90adc9bd..1fd56009139b 100644 --- a/internal/service/appstream/fleet_test.go +++ b/internal/service/appstream/fleet_test.go @@ -333,7 +333,7 @@ resource "aws_subnet" "test" { } resource "aws_appstream_fleet" "test" { - name = %[1]q + name = %[1]q image_arn = "arn:${data.aws_partition.current.partition}:appstream:${data.aws_region.current.name}::image/Amazon-AppStream2-Sample-Image-02-04-2019" compute_capacity { From f7d3965de35c95fd6aff9b9bdeee244df1d6245b Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 12 Nov 2021 15:02:16 -0500 Subject: [PATCH 139/304] run terrafmt on test file --- .../service/s3/bucket_replication_configuration_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 5575cee8b943..39dbc1e16976 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1190,7 +1190,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - prefix = "prefix2" + prefix = "prefix2" } delete_marker_replication { @@ -1251,9 +1251,9 @@ resource "aws_s3_bucket_replication_configuration" "replication" { status = "Enabled" filter { - prefix = "prefix1" + prefix = "prefix1" } - + delete_marker_replication { status = "Enabled" } From 0501e84a9fbd64227e9683f000242a3a027a4fa6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 16:02:31 -0500 Subject: [PATCH 140/304] r/aws_s3control_multi_region_access_point: Add 'findMultiRegionAccessPointOperationByAccountIDAndTokenARN'. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlMultiRegionAccessPoint_\|TestAccS3ControlMultiRegionAccessPointPolicy_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlMultiRegionAccessPoint_\|TestAccS3ControlMultiRegionAccessPointPolicy_ -timeout 180m === RUN TestAccS3ControlMultiRegionAccessPointPolicy_basic === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_basic === RUN TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint === RUN TestAccS3ControlMultiRegionAccessPointPolicy_details_policy === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_details_policy === RUN TestAccS3ControlMultiRegionAccessPointPolicy_details_name === PAUSE TestAccS3ControlMultiRegionAccessPointPolicy_details_name === RUN TestAccS3ControlMultiRegionAccessPoint_basic === PAUSE TestAccS3ControlMultiRegionAccessPoint_basic === RUN TestAccS3ControlMultiRegionAccessPoint_disappears === PAUSE TestAccS3ControlMultiRegionAccessPoint_disappears === RUN TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock === PAUSE TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock === RUN TestAccS3ControlMultiRegionAccessPoint_name === PAUSE TestAccS3ControlMultiRegionAccessPoint_name === CONT TestAccS3ControlMultiRegionAccessPointPolicy_basic === CONT TestAccS3ControlMultiRegionAccessPoint_disappears === CONT TestAccS3ControlMultiRegionAccessPointPolicy_details_name === CONT TestAccS3ControlMultiRegionAccessPoint_basic === CONT TestAccS3ControlMultiRegionAccessPoint_name === CONT TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock === CONT TestAccS3ControlMultiRegionAccessPointPolicy_details_policy === CONT TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint --- PASS: TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock (325.46s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_basic (331.72s) --- PASS: TestAccS3ControlMultiRegionAccessPoint_basic (335.65s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPoint (339.30s) --- PASS: TestAccS3ControlMultiRegionAccessPoint_disappears (342.69s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_details_policy (421.81s) --- PASS: TestAccS3ControlMultiRegionAccessPoint_name (898.15s) --- PASS: TestAccS3ControlMultiRegionAccessPointPolicy_details_name (900.66s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 904.310s --- internal/service/s3control/consts.go | 7 + internal/service/s3control/errors.go | 1 + internal/service/s3control/find.go | 26 ++ .../s3control/multi_region_access_point.go | 254 ++++++++++++------ .../multi_region_access_point_policy.go | 120 ++++++--- .../multi_region_access_point_policy_test.go | 7 +- .../multi_region_access_point_test.go | 29 +- internal/service/s3control/status.go | 34 +-- internal/service/s3control/wait.go | 13 +- 9 files changed, 309 insertions(+), 182 deletions(-) create mode 100644 internal/service/s3control/consts.go diff --git a/internal/service/s3control/consts.go b/internal/service/s3control/consts.go new file mode 100644 index 000000000000..68a30d284a35 --- /dev/null +++ b/internal/service/s3control/consts.go @@ -0,0 +1,7 @@ +package s3control + +// AsyncOperation.RequestStatus values. +const ( + RequestStatusFailed = "FAILED" + RequestStatusSucceeded = "SUCCEEDED" +) diff --git a/internal/service/s3control/errors.go b/internal/service/s3control/errors.go index 4ea41c00def2..adc8d4e5ec4e 100644 --- a/internal/service/s3control/errors.go +++ b/internal/service/s3control/errors.go @@ -6,5 +6,6 @@ package s3control const ( errCodeNoSuchAccessPoint = "NoSuchAccessPoint" errCodeNoSuchAccessPointPolicy = "NoSuchAccessPointPolicy" + errCodeNoSuchAsyncRequest = "NoSuchAsyncRequest" errCodeNoSuchMultiRegionAccessPoint = "NoSuchMultiRegionAccessPoint" ) diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index b7311212e3d8..f9104f11100a 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -52,6 +52,32 @@ func FindMultiRegionAccessPointByAccountIDAndName(conn *s3control.S3Control, acc return output.AccessPoint, nil } +func findMultiRegionAccessPointOperationByAccountIDAndTokenARN(conn *s3control.S3Control, accountID string, requestTokenARN string) (*s3control.AsyncOperation, error) { + input := &s3control.DescribeMultiRegionAccessPointOperationInput{ + AccountId: aws.String(accountID), + RequestTokenARN: aws.String(requestTokenARN), + } + + output, err := conn.DescribeMultiRegionAccessPointOperation(input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAsyncRequest) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.AsyncOperation == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.AsyncOperation, nil +} + func FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (*s3control.MultiRegionAccessPointPolicyDocument, error) { input := &s3control.GetMultiRegionAccessPointPolicyInput{ AccountId: aws.String(accountID), diff --git a/internal/service/s3control/multi_region_access_point.go b/internal/service/s3control/multi_region_access_point.go index 66e417df0d50..1e81aa4af8a1 100644 --- a/internal/service/s3control/multi_region_access_point.go +++ b/internal/service/s3control/multi_region_access_point.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3control" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -133,59 +132,59 @@ func ResourceMultiRegionAccessPoint() *schema.Resource { } func resourceMultiRegionAccessPointCreate(d *schema.ResourceData, meta interface{}) error { - conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + conn, err := s3ControlConn(meta.(*conns.AWSClient)) if err != nil { - return fmt.Errorf("Error getting S3Control Client: %s", err) + return err } - accountId := meta.(*conns.AWSClient).AccountID + accountID := meta.(*conns.AWSClient).AccountID if v, ok := d.GetOk("account_id"); ok { - accountId = v.(string) + accountID = v.(string) } input := &s3control.CreateMultiRegionAccessPointInput{ - AccountId: aws.String(accountId), - Details: expandMultiRegionAccessPointDetails(d.Get("details").([]interface{})[0].(map[string]interface{})), + AccountId: aws.String(accountID), + } + + if v, ok := d.GetOk("details"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.Details = expandCreateMultiRegionAccessPointInput_(v.([]interface{})[0].(map[string]interface{})) } - name := aws.StringValue(input.Details.Name) + resourceID := MultiRegionAccessPointCreateResourceID(accountID, aws.StringValue(input.Details.Name)) + log.Printf("[DEBUG] Creating S3 Multi-Region Access Point: %s", input) output, err := conn.CreateMultiRegionAccessPoint(input) if err != nil { - return fmt.Errorf("error creating S3 Control Multi-Region Access Point (%s): %w", name, err) + return fmt.Errorf("error creating S3 Multi-Region Access Point (%s): %w", resourceID, err) } - if output == nil { - return fmt.Errorf("error creating S3 Control Multi-Region Access Point (%s): empty response", name) - } + d.SetId(resourceID) - requestTokenARN := aws.StringValue(output.RequestTokenARN) - _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountID, aws.StringValue(output.RequestTokenARN), d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf("error waiting for S3 Multi-Region Access Point (%s) to create: %s", d.Id(), err) + return fmt.Errorf("error waiting for Multi-Region Access Point (%s) create: %s", d.Id(), err) } - d.SetId(fmt.Sprintf("%s:%s", accountId, name)) - return resourceMultiRegionAccessPointRead(d, meta) } func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{}) error { - conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + conn, err := s3ControlConn(meta.(*conns.AWSClient)) if err != nil { - return fmt.Errorf("Error getting S3Control Client: %s", err) + return err } - accountId, name, err := MultiRegionAccessPointParseId(d.Id()) + accountID, name, err := MultiRegionAccessPointParseResourceID(d.Id()) + if err != nil { return err } - accessPoint, err := FindMultiRegionAccessPointByAccountIDAndName(conn, accountId, name) + accessPoint, err := FindMultiRegionAccessPointByAccountIDAndName(conn, accountID, name) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Multi-Region Access Point (%s) not found, removing from state", d.Id()) @@ -197,41 +196,46 @@ func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error reading S3 Multi-Region Access Point (%s): %w", d.Id(), err) } - d.Set("account_id", accountId) - d.Set("alias", accessPoint.Alias) - d.Set("domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.accesspoint.s3-global", aws.StringValue(accessPoint.Alias)))) - d.Set("status", accessPoint.Status) - - multiRegionAccessPointARN := arn.ARN{ - AccountID: accountId, + alias := aws.StringValue(accessPoint.Alias) + arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, - Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(accessPoint.Alias)), Service: "s3", + AccountID: accountID, + Resource: fmt.Sprintf("accesspoint/%s", alias), + }.String() + d.Set("account_id", accountID) + d.Set("alias", alias) + d.Set("arn", arn) + if accessPoint != nil { + if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointReport(accessPoint)}); err != nil { + return fmt.Errorf("error setting details: %w", err) + } + } else { + d.Set("details", nil) } - - d.Set("arn", multiRegionAccessPointARN.String()) - - if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointDetails(accessPoint)}); err != nil { - return fmt.Errorf("error setting details: %s", err) - } + // https://docs.aws.amazon.com/AmazonS3/latest/userguide//MultiRegionAccessPointRequests.html#MultiRegionAccessPointHostnames. + d.Set("domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.accesspoint.s3-global", alias))) + d.Set("status", accessPoint.Status) return nil } func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface{}) error { - conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + conn, err := s3ControlConn(meta.(*conns.AWSClient)) + if err != nil { - return fmt.Errorf("Error getting S3Control Client: %s", err) + return err } - accountId, name, err := MultiRegionAccessPointParseId(d.Id()) + accountID, name, err := MultiRegionAccessPointParseResourceID(d.Id()) + if err != nil { return err } log.Printf("[DEBUG] Deleting S3 Multi-Region Access Point: %s", d.Id()) output, err := conn.DeleteMultiRegionAccessPoint(&s3control.DeleteMultiRegionAccessPointInput{ - AccountId: aws.String(accountId), + AccountId: aws.String(accountID), Details: &s3control.DeleteMultiRegionAccessPointInput_{ Name: aws.String(name), }, @@ -245,28 +249,35 @@ func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface return fmt.Errorf("error deleting S3 Multi-Region Access Point (%s): %w", d.Id(), err) } - requestTokenARN := aws.StringValue(output.RequestTokenARN) - _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutDelete)) + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountID, aws.StringValue(output.RequestTokenARN), d.Timeout(schema.TimeoutDelete)) if err != nil { - return fmt.Errorf("error waiting for S3 Multi-Region Access Point (%s) to delete: %w", d.Id(), err) + return fmt.Errorf("error waiting for S3 Multi-Region Access Point (%s) delete: %w", d.Id(), err) } return nil } -// MultiRegionAccessPointParseId returns the Account ID and Access Point Name (S3) -func MultiRegionAccessPointParseId(id string) (string, string, error) { - parts := strings.SplitN(id, ":", 2) +const multiRegionAccessPointResourceIDSeparator = ":" - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", fmt.Errorf("unexpected format of ID (%s), expected ACCOUNT_ID:NAME", id) +func MultiRegionAccessPointCreateResourceID(accountID, accessPointName string) string { + parts := []string{accountID, accessPointName} + id := strings.Join(parts, multiRegionAccessPointResourceIDSeparator) + + return id +} + +func MultiRegionAccessPointParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, multiRegionAccessPointResourceIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil } - return parts[0], parts[1], nil + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected account-id%[2]saccess-point-name", id, multiRegionAccessPointResourceIDSeparator) } -func expandMultiRegionAccessPointDetails(tfMap map[string]interface{}) *s3control.CreateMultiRegionAccessPointInput_ { +func expandCreateMultiRegionAccessPointInput_(tfMap map[string]interface{}) *s3control.CreateMultiRegionAccessPointInput_ { if tfMap == nil { return nil } @@ -278,36 +289,83 @@ func expandMultiRegionAccessPointDetails(tfMap map[string]interface{}) *s3contro } if v, ok := tfMap["public_access_block"].([]interface{}); ok && len(v) > 0 { - apiObject.PublicAccessBlock = expandS3AccessPointPublicAccessBlockConfiguration(v) + apiObject.PublicAccessBlock = expandPublicAccessBlockConfiguration(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["region"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Regions = expandRegions(v.List()) + } + + return apiObject +} + +func expandPublicAccessBlockConfiguration(tfMap map[string]interface{}) *s3control.PublicAccessBlockConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &s3control.PublicAccessBlockConfiguration{} + + if v, ok := tfMap["block_public_acls"].(bool); ok { + apiObject.BlockPublicAcls = aws.Bool(v) + } + + if v, ok := tfMap["block_public_policy"].(bool); ok { + apiObject.BlockPublicPolicy = aws.Bool(v) + } + + if v, ok := tfMap["ignore_public_acls"].(bool); ok { + apiObject.IgnorePublicAcls = aws.Bool(v) + } + + if v, ok := tfMap["restrict_public_buckets"].(bool); ok { + apiObject.RestrictPublicBuckets = aws.Bool(v) + } + + return apiObject +} + +func expandRegion(tfMap map[string]interface{}) *s3control.Region { + if tfMap == nil { + return nil } - if v, ok := tfMap["region"]; ok { - apiObject.Regions = expandMultiRegionAccessPointRegions(v.(*schema.Set).List()) + apiObject := &s3control.Region{} + + if v, ok := tfMap["bucket"].(string); ok { + apiObject.Bucket = aws.String(v) } return apiObject } -func expandMultiRegionAccessPointRegions(tfList []interface{}) []*s3control.Region { - regions := make([]*s3control.Region, 0, len(tfList)) +func expandRegions(tfList []interface{}) []*s3control.Region { + if len(tfList) == 0 { + return nil + } + + var apiObjects []*s3control.Region for _, tfMapRaw := range tfList { - value, ok := tfMapRaw.(map[string]interface{}) + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { continue } - region := &s3control.Region{ - Bucket: aws.String(value["bucket"].(string)), + apiObject := expandRegion(tfMap) + + if apiObject == nil { + continue } - regions = append(regions, region) + apiObjects = append(apiObjects, apiObject) } - return regions + return apiObjects } -func flattenMultiRegionAccessPointDetails(apiObject *s3control.MultiRegionAccessPointReport) map[string]interface{} { +func flattenMultiRegionAccessPointReport(apiObject *s3control.MultiRegionAccessPointReport) map[string]interface{} { if apiObject == nil { return nil } @@ -319,17 +377,57 @@ func flattenMultiRegionAccessPointDetails(apiObject *s3control.MultiRegionAccess } if v := apiObject.PublicAccessBlock; v != nil { - tfMap["public_access_block"] = flattenS3AccessPointPublicAccessBlockConfiguration(v) + tfMap["public_access_block"] = []interface{}{flattenPublicAccessBlockConfiguration(v)} } if v := apiObject.Regions; v != nil { - tfMap["region"] = flattenMultiRegionAccessPointRegions(v) + tfMap["region"] = flattenRegionReports(v) } return tfMap } -func flattenMultiRegionAccessPointRegions(apiObjects []*s3control.RegionReport) []interface{} { +func flattenPublicAccessBlockConfiguration(apiObject *s3control.PublicAccessBlockConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BlockPublicAcls; v != nil { + tfMap["block_public_acls"] = aws.BoolValue(v) + } + + if v := apiObject.BlockPublicPolicy; v != nil { + tfMap["block_public_policy"] = aws.BoolValue(v) + } + + if v := apiObject.IgnorePublicAcls; v != nil { + tfMap["ignore_public_acls"] = aws.BoolValue(v) + } + + if v := apiObject.RestrictPublicBuckets; v != nil { + tfMap["restrict_public_buckets"] = aws.BoolValue(v) + } + + return tfMap +} + +func flattenRegionReport(apiObject *s3control.RegionReport) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Bucket; v != nil { + tfMap["bucket"] = aws.StringValue(v) + } + + return tfMap +} + +func flattenRegionReports(apiObjects []*s3control.RegionReport) []interface{} { if len(apiObjects) == 0 { return nil } @@ -341,34 +439,26 @@ func flattenMultiRegionAccessPointRegions(apiObjects []*s3control.RegionReport) continue } - if apiObject.Bucket == nil { - continue - } - - m := map[string]interface{}{} - if v := apiObject.Bucket; v != nil { - m["bucket"] = aws.StringValue(v) - } - - tfList = append(tfList, m) + tfList = append(tfList, flattenRegionReport(apiObject)) } return tfList } -func getS3ControlConn(awsClient *conns.AWSClient) (*s3control.S3Control, error) { - if awsClient.S3ControlConn.Config.Region != nil && *awsClient.S3ControlConn.Config.Region == endpoints.UsWest2RegionID { - return awsClient.S3ControlConn, nil +func s3ControlConn(client *conns.AWSClient) (*s3control.S3Control, error) { + originalConn := client.S3ControlConn + // All Multi-Region Access Point actions are routed to the US West (Oregon) Region. + region := endpoints.UsWest2RegionID + + if originalConn.Config.Region != nil && aws.StringValue(originalConn.Config.Region) == region { + return originalConn, nil } - sess, err := session.NewSession(&awsClient.S3ControlConn.Config) + sess, err := conns.NewSessionForRegion(&originalConn.Config, region, client.TerraformVersion) if err != nil { - return nil, fmt.Errorf("error creating AWS S3Control session: %w", err) + return nil, fmt.Errorf("error creating AWS session: %w", err) } - // Multi-Region Access Point requires requests to be routed to the us-west-2 endpoint - conn := s3control.New(sess.Copy(&aws.Config{Region: aws.String(endpoints.UsWest2RegionID)})) - - return conn, nil + return s3control.New(sess), nil } diff --git a/internal/service/s3control/multi_region_access_point_policy.go b/internal/service/s3control/multi_region_access_point_policy.go index e2118f46deac..68a2617b1f1f 100644 --- a/internal/service/s3control/multi_region_access_point_policy.go +++ b/internal/service/s3control/multi_region_access_point_policy.go @@ -73,53 +73,59 @@ func ResourceMultiRegionAccessPointPolicy() *schema.Resource { } func resourceMultiRegionAccessPointPolicyCreate(d *schema.ResourceData, meta interface{}) error { - conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + conn, err := s3ControlConn(meta.(*conns.AWSClient)) + if err != nil { - return fmt.Errorf("Error getting S3Control Client: %s", err) + return err } - accountId := meta.(*conns.AWSClient).AccountID + accountID := meta.(*conns.AWSClient).AccountID if v, ok := d.GetOk("account_id"); ok { - accountId = v.(string) + accountID = v.(string) } input := &s3control.PutMultiRegionAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Details: expandMultiRegionAccessPointPolicyDetails(d.Get("details").([]interface{})[0].(map[string]interface{})), + AccountId: aws.String(accountID), + } + + if v, ok := d.GetOk("details"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.Details = expandPutMultiRegionAccessPointPolicyInput_(v.([]interface{})[0].(map[string]interface{})) } - name := aws.StringValue(input.Details.Name) - log.Printf("[DEBUG] Creating S3 Multi-Region Access Point policy: %s", d.Id()) + resourceID := MultiRegionAccessPointCreateResourceID(accountID, aws.StringValue(input.Details.Name)) + + log.Printf("[DEBUG] Creating S3 Multi-Region Access Point Policy: %s", input) output, err := conn.PutMultiRegionAccessPointPolicy(input) if err != nil { - return fmt.Errorf("error creating S3 Multi-Region Access Point (%s) policy: %s", d.Id(), err) + return fmt.Errorf("error creating S3 Multi-Region Access Point (%s) Policy: %w", resourceID, err) } - requestTokenARN := aws.StringValue(output.RequestTokenARN) - _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutCreate)) + d.SetId(resourceID) + + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountID, aws.StringValue(output.RequestTokenARN), d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) to be created: %s", d.Id(), err) + return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) create: %w", d.Id(), err) } - d.SetId(fmt.Sprintf("%s:%s", accountId, name)) - return resourceMultiRegionAccessPointPolicyRead(d, meta) } func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta interface{}) error { - conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + conn, err := s3ControlConn(meta.(*conns.AWSClient)) + if err != nil { - return fmt.Errorf("Error getting S3Control Client: %s", err) + return err } - accountId, name, err := MultiRegionAccessPointParseId(d.Id()) + accountID, name, err := MultiRegionAccessPointParseResourceID(d.Id()) + if err != nil { return err } - policyDocument, err := FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn, accountId, name) + policyDocument, err := FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn, accountID, name) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Multi-Region Access Point Policy (%s) not found, removing from state", d.Id()) @@ -131,48 +137,65 @@ func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta inter return fmt.Errorf("error reading S3 Multi-Region Access Point Policy (%s): %w", d.Id(), err) } - d.Set("account_id", accountId) - d.Set("established", policyDocument.Established.Policy) - d.Set("proposed", policyDocument.Proposed.Policy) - d.Set("details", []interface{}{policyDocumentToDetailsMap(aws.String(name), policyDocument)}) + d.Set("account_id", accountID) + if policyDocument != nil { + if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointPolicyDocument(name, policyDocument)}); err != nil { + return fmt.Errorf("error setting details: %w", err) + } + } else { + d.Set("details", nil) + } + if v := policyDocument.Established; v != nil { + d.Set("established", v.Policy) + } else { + d.Set("established", nil) + } + if v := policyDocument.Proposed; v != nil { + d.Set("proposed", v.Policy) + } else { + d.Set("proposed", nil) + } return nil } func resourceMultiRegionAccessPointPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - conn, err := getS3ControlConn(meta.(*conns.AWSClient)) + conn, err := s3ControlConn(meta.(*conns.AWSClient)) if err != nil { return fmt.Errorf("Error getting S3Control Client: %s", err) } - accountId, _, err := MultiRegionAccessPointParseId(d.Id()) + accountID, _, err := MultiRegionAccessPointParseResourceID(d.Id()) + if err != nil { return err } - if d.HasChange("details") { - log.Printf("[DEBUG] Updating S3 Multi-Region Access Point policy: %s", d.Id()) - output, err := conn.PutMultiRegionAccessPointPolicy(&s3control.PutMultiRegionAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Details: expandMultiRegionAccessPointPolicyDetails(d.Get("details").([]interface{})[0].(map[string]interface{})), - }) + input := &s3control.PutMultiRegionAccessPointPolicyInput{ + AccountId: aws.String(accountID), + } - if err != nil { - return fmt.Errorf("error updating S3 Multi-Region Access Point (%s) policy: %s", d.Id(), err) - } + if v, ok := d.GetOk("details"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.Details = expandPutMultiRegionAccessPointPolicyInput_(v.([]interface{})[0].(map[string]interface{})) + } - requestTokenARN := *output.RequestTokenARN - _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountId, requestTokenARN, d.Timeout(schema.TimeoutUpdate)) + log.Printf("[DEBUG] Updating S3 Multi-Region Access Point Policy: %s", input) + output, err := conn.PutMultiRegionAccessPointPolicy(input) - if err != nil { - return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) to update: %s", d.Id(), err) - } + if err != nil { + return fmt.Errorf("error updating S3 Multi-Region Access Point (%s) Policy: %w", d.Id(), err) + } + + _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountID, aws.StringValue(output.RequestTokenARN), d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("error waiting for S3 Multi-Region Access Point Policy (%s) update: %w", d.Id(), err) } return resourceMultiRegionAccessPointPolicyRead(d, meta) } -func expandMultiRegionAccessPointPolicyDetails(tfMap map[string]interface{}) *s3control.PutMultiRegionAccessPointPolicyInput_ { +func expandPutMultiRegionAccessPointPolicyInput_(tfMap map[string]interface{}) *s3control.PutMultiRegionAccessPointPolicyInput_ { if tfMap == nil { return nil } @@ -190,11 +213,20 @@ func expandMultiRegionAccessPointPolicyDetails(tfMap map[string]interface{}) *s3 return apiObject } -func policyDocumentToDetailsMap(multiRegionAccessPointName *string, policyDocument *s3control.MultiRegionAccessPointPolicyDocument) map[string]interface{} { - details := map[string]interface{}{} +func flattenMultiRegionAccessPointPolicyDocument(name string, apiObject *s3control.MultiRegionAccessPointPolicyDocument) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + tfMap["name"] = name - details["name"] = aws.StringValue(multiRegionAccessPointName) - details["policy"] = aws.StringValue(policyDocument.Proposed.Policy) + if v := apiObject.Proposed; v != nil { + if v := v.Policy; v != nil { + tfMap["policy"] = aws.StringValue(v) + } + } - return details + return tfMap } diff --git a/internal/service/s3control/multi_region_access_point_policy_test.go b/internal/service/s3control/multi_region_access_point_policy_test.go index f4b5f7e94fc5..5a5214c2dcce 100644 --- a/internal/service/s3control/multi_region_access_point_policy_test.go +++ b/internal/service/s3control/multi_region_access_point_policy_test.go @@ -76,7 +76,7 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPo Config: testAccMultiRegionAccessPointPolicyConfig_basic(bucketName, rName), Check: resource.ComposeTestCheckFunc( testAccCheckMultiRegionAccessPointExists(resourceName, &v), - testAccCheckMultiRegionAccessPointDisappears(acctest.Provider, tfs3control.ResourceMultiRegionAccessPoint(), parentResourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceMultiRegionAccessPoint(), parentResourceName), ), ExpectNonEmptyPlan: true, }, @@ -175,14 +175,15 @@ func testAccCheckMultiRegionAccessPointPolicyExists(n string, v *s3control.Multi return fmt.Errorf("No S3 Multi-Region Access Point Policy ID is set") } - accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) + accountID, name, err := tfs3control.MultiRegionAccessPointParseResourceID(rs.Primary.ID) + if err != nil { return err } conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - output, err := tfs3control.FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn, accountId, name) + output, err := tfs3control.FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn, accountID, name) if err != nil { return err diff --git a/internal/service/s3control/multi_region_access_point_test.go b/internal/service/s3control/multi_region_access_point_test.go index f0c7df78c94b..49ee124622c8 100644 --- a/internal/service/s3control/multi_region_access_point_test.go +++ b/internal/service/s3control/multi_region_access_point_test.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/service/s3control" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -81,7 +80,7 @@ func TestAccS3ControlMultiRegionAccessPoint_disappears(t *testing.T) { Config: testAccMultiRegionAccessPointConfig_basic(bucketName, rName), Check: resource.ComposeTestCheckFunc( testAccCheckMultiRegionAccessPointExists(resourceName, &v), - testAccCheckMultiRegionAccessPointDisappears(acctest.Provider, tfs3control.ResourceMultiRegionAccessPoint(), resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceMultiRegionAccessPoint(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -164,22 +163,6 @@ func TestAccS3ControlMultiRegionAccessPoint_name(t *testing.T) { }) } -func testAccCheckMultiRegionAccessPointDisappears(provider *schema.Provider, resource *schema.Resource, resourceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resourceState, ok := s.RootModule().Resources[resourceName] - - if !ok { - return fmt.Errorf("resource not found: %s", resourceName) - } - - if resourceState.Primary.ID == "" { - return fmt.Errorf("No S3 Multi-Region Access Point ID is set") - } - - return acctest.DeleteResource(resource, resource.Data(resourceState.Primary), provider.Meta()) - } -} - func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn @@ -188,12 +171,13 @@ func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { continue } - accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) + accountID, name, err := tfs3control.MultiRegionAccessPointParseResourceID(rs.Primary.ID) + if err != nil { return err } - _, err = tfs3control.FindMultiRegionAccessPointByAccountIDAndName(conn, accountId, name) + _, err = tfs3control.FindMultiRegionAccessPointByAccountIDAndName(conn, accountID, name) if tfresource.NotFound(err) { continue @@ -220,14 +204,15 @@ func testAccCheckMultiRegionAccessPointExists(n string, v *s3control.MultiRegion return fmt.Errorf("No S3 Multi-Region Access Point ID is set") } - accountId, name, err := tfs3control.MultiRegionAccessPointParseId(rs.Primary.ID) + accountID, name, err := tfs3control.MultiRegionAccessPointParseResourceID(rs.Primary.ID) + if err != nil { return err } conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - output, err := tfs3control.FindMultiRegionAccessPointByAccountIDAndName(conn, accountId, name) + output, err := tfs3control.FindMultiRegionAccessPointByAccountIDAndName(conn, accountID, name) if err != nil { return err diff --git a/internal/service/s3control/status.go b/internal/service/s3control/status.go index d8f417a0f099..659ec34b6852 100644 --- a/internal/service/s3control/status.go +++ b/internal/service/s3control/status.go @@ -1,21 +1,12 @@ package s3control import ( - "fmt" - "log" "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -const ( - // RequestStatus SUCCEEDED - RequestStatusSucceeded = "SUCCEEDED" - - // RequestStatus FAILED - RequestStatusFailed = "FAILED" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) // statusPublicAccessBlockConfigurationBlockPublicACLs fetches the PublicAccessBlockConfiguration and its BlockPublicAcls @@ -86,29 +77,18 @@ func statusPublicAccessBlockConfigurationRestrictPublicBuckets(conn *s3control.S } } -func statusMultiRegionAccessPointRequest(conn *s3control.S3Control, accountId string, requestTokenArn string) resource.StateRefreshFunc { +func statusMultiRegionAccessPointRequest(conn *s3control.S3Control, accountID string, requestTokenARN string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - input := &s3control.DescribeMultiRegionAccessPointOperationInput{ - AccountId: aws.String(accountId), - RequestTokenARN: aws.String(requestTokenArn), - } - - log.Printf("[DEBUG] Describing S3 Multi-Region Access Point Operation (%s): %s", requestTokenArn, input) + output, err := findMultiRegionAccessPointOperationByAccountIDAndTokenARN(conn, accountID, requestTokenARN) - output, err := conn.DescribeMultiRegionAccessPointOperation(input) + if tfresource.NotFound(err) { + return nil, "", nil + } if err != nil { - log.Printf("error Describing S3 Multi-Region Access Point Operation (%s): %s", requestTokenArn, err) return nil, "", err } - asyncOperation := output.AsyncOperation - - if aws.StringValue(asyncOperation.RequestStatus) == RequestStatusFailed { - errorDetails := asyncOperation.ResponseDetails.ErrorDetails - return nil, RequestStatusFailed, fmt.Errorf("S3 Multi-Region Access Point asynchronous operation failed (%s): %s: %s", requestTokenArn, aws.StringValue(errorDetails.Code), aws.StringValue(errorDetails.Message)) - } - - return asyncOperation, aws.StringValue(asyncOperation.RequestStatus), nil + return output, aws.StringValue(output.RequestStatus), nil } } diff --git a/internal/service/s3control/wait.go b/internal/service/s3control/wait.go index da23615c5787..296b3a8d7404 100644 --- a/internal/service/s3control/wait.go +++ b/internal/service/s3control/wait.go @@ -1,12 +1,14 @@ package s3control import ( - "log" + "fmt" "strconv" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) const ( @@ -96,19 +98,22 @@ func waitPublicAccessBlockConfigurationRestrictPublicBucketsUpdated(conn *s3cont return nil, err } -func waitMultiRegionAccessPointRequestSucceeded(conn *s3control.S3Control, accountId string, requestTokenArn string, timeout time.Duration) (*s3control.AsyncOperation, error) { //nolint:unparam +func waitMultiRegionAccessPointRequestSucceeded(conn *s3control.S3Control, accountID string, requestTokenArn string, timeout time.Duration) (*s3control.AsyncOperation, error) { //nolint:unparam stateConf := &resource.StateChangeConf{ Target: []string{RequestStatusSucceeded}, Timeout: timeout, - Refresh: statusMultiRegionAccessPointRequest(conn, accountId, requestTokenArn), + Refresh: statusMultiRegionAccessPointRequest(conn, accountID, requestTokenArn), MinTimeout: multiRegionAccessPointRequestSucceededMinTimeout, Delay: multiRegionAccessPointRequestSucceededDelay, // Wait 15 secs before starting } - log.Printf("[DEBUG] Waiting for S3 Multi-Region Access Point request (%s) to succeed", requestTokenArn) outputRaw, err := stateConf.WaitForState() if output, ok := outputRaw.(*s3control.AsyncOperation); ok { + if status, responseDetails := aws.StringValue(output.RequestStatus), output.ResponseDetails; status == RequestStatusFailed && responseDetails != nil && responseDetails.ErrorDetails != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(responseDetails.ErrorDetails.Code), aws.StringValue(responseDetails.ErrorDetails.Message))) + } + return output, err } From c73e0dc0919b35e12b3fb63d4be3aa766498bbe6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 16:24:24 -0500 Subject: [PATCH 141/304] r/aws_s3control_multi_region_access_point: Fixup sweeper. Acceptance test output: % make sweep SWEEPARGS=-sweep-run=aws_s3control_multi_region_access_point SWEEP=us-west-2,us-west-1,us-east-2,us-east-1 WARNING: This will destroy infrastructure. Use only in development accounts. go test ./internal/sweep -v -tags=sweep -sweep=us-west-2,us-west-1,us-east-2,us-east-1 -sweep-run=aws_s3control_multi_region_access_point -timeout 60m 2021/11/12 16:15:48 [DEBUG] Running Sweepers for region (us-west-2): 2021/11/12 16:15:48 [DEBUG] Running Sweeper (aws_s3control_multi_region_access_point) in region (us-west-2) 2021/11/12 16:15:48 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:15:48 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:15:48 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-9076530765028862245 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-4507661412741926133 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-8245535467367663015 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-7991657015236693467 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-8736564941094484309 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-4838572432635178774 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-5037569265071601673 2021/11/12 16:15:49 [DEBUG] Waiting for state to become: [success] 2021/11/12 16:15:49 [DEBUG] Deleting S3 Multi-Region Access Point: 187416307283:tf-acc-test-9089626914245414205 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:15:50 [DEBUG] Waiting for state to become: [SUCCEEDED] 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:05 [TRACE] Waiting 5s before next try 2021/11/12 16:16:10 [TRACE] Waiting 10s before next try 2021/11/12 16:16:10 [TRACE] Waiting 10s before next try 2021/11/12 16:16:11 [TRACE] Waiting 10s before next try 2021/11/12 16:16:11 [TRACE] Waiting 10s before next try 2021/11/12 16:16:11 [TRACE] Waiting 10s before next try 2021/11/12 16:16:11 [TRACE] Waiting 10s before next try 2021/11/12 16:16:11 [TRACE] Waiting 10s before next try 2021/11/12 16:16:11 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:21 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:31 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:42 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:16:52 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:03 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:13 [TRACE] Waiting 10s before next try 2021/11/12 16:17:23 [TRACE] Waiting 10s before next try 2021/11/12 16:17:23 [TRACE] Waiting 10s before next try 2021/11/12 16:17:23 [TRACE] Waiting 10s before next try 2021/11/12 16:17:23 [TRACE] Waiting 10s before next try 2021/11/12 16:17:23 [TRACE] Waiting 10s before next try 2021/11/12 16:17:23 [TRACE] Waiting 10s before next try 2021/11/12 16:17:23 [TRACE] Waiting 10s before next try 2021/11/12 16:17:24 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:34 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:44 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:17:55 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:05 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:16 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:26 [TRACE] Waiting 10s before next try 2021/11/12 16:18:36 [TRACE] Waiting 10s before next try 2021/11/12 16:18:36 [TRACE] Waiting 10s before next try 2021/11/12 16:18:36 [TRACE] Waiting 10s before next try 2021/11/12 16:18:36 [TRACE] Waiting 10s before next try 2021/11/12 16:18:36 [TRACE] Waiting 10s before next try 2021/11/12 16:18:37 [TRACE] Waiting 10s before next try 2021/11/12 16:18:37 [TRACE] Waiting 10s before next try 2021/11/12 16:18:37 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:47 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:18:57 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:08 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:19 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:29 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:39 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:19:50 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:00 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:11 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:21 [TRACE] Waiting 10s before next try 2021/11/12 16:20:31 [TRACE] Waiting 10s before next try 2021/11/12 16:20:32 [TRACE] Waiting 10s before next try 2021/11/12 16:20:32 [TRACE] Waiting 10s before next try 2021/11/12 16:20:32 [TRACE] Waiting 10s before next try 2021/11/12 16:20:32 [TRACE] Waiting 10s before next try 2021/11/12 16:20:32 [TRACE] Waiting 10s before next try 2021/11/12 16:20:32 [TRACE] Waiting 10s before next try 2021/11/12 16:20:32 [TRACE] Waiting 10s before next try 2021/11/12 16:20:42 [TRACE] Waiting 10s before next try 2021/11/12 16:20:42 [TRACE] Waiting 10s before next try 2021/11/12 16:20:42 [TRACE] Waiting 10s before next try 2021/11/12 16:20:42 [TRACE] Waiting 10s before next try 2021/11/12 16:20:42 [TRACE] Waiting 10s before next try 2021/11/12 16:20:42 [TRACE] Waiting 10s before next try 2021/11/12 16:20:42 [TRACE] Waiting 10s before next try 2021/11/12 16:20:47 [TRACE] Waiting 10s before next try 2021/11/12 16:20:52 [TRACE] Waiting 10s before next try 2021/11/12 16:20:52 [TRACE] Waiting 10s before next try 2021/11/12 16:20:52 [TRACE] Waiting 10s before next try 2021/11/12 16:20:52 [TRACE] Waiting 10s before next try 2021/11/12 16:20:52 [TRACE] Waiting 10s before next try 2021/11/12 16:20:53 [TRACE] Waiting 10s before next try 2021/11/12 16:20:53 [TRACE] Waiting 10s before next try 2021/11/12 16:20:58 [TRACE] Waiting 10s before next try 2021/11/12 16:21:03 [TRACE] Waiting 10s before next try 2021/11/12 16:21:03 [TRACE] Waiting 10s before next try 2021/11/12 16:21:03 [TRACE] Waiting 10s before next try 2021/11/12 16:21:03 [TRACE] Waiting 10s before next try 2021/11/12 16:21:03 [TRACE] Waiting 10s before next try 2021/11/12 16:21:03 [TRACE] Waiting 10s before next try 2021/11/12 16:21:03 [TRACE] Waiting 10s before next try 2021/11/12 16:21:08 [TRACE] Waiting 10s before next try 2021/11/12 16:21:13 [TRACE] Waiting 10s before next try 2021/11/12 16:21:13 [TRACE] Waiting 10s before next try 2021/11/12 16:21:13 [TRACE] Waiting 10s before next try 2021/11/12 16:21:13 [TRACE] Waiting 10s before next try 2021/11/12 16:21:13 [TRACE] Waiting 10s before next try 2021/11/12 16:21:13 [TRACE] Waiting 10s before next try 2021/11/12 16:21:14 [TRACE] Waiting 10s before next try 2021/11/12 16:21:19 [TRACE] Waiting 10s before next try 2021/11/12 16:21:24 [TRACE] Waiting 10s before next try 2021/11/12 16:21:24 [TRACE] Waiting 10s before next try 2021/11/12 16:21:24 [TRACE] Waiting 10s before next try 2021/11/12 16:21:24 [TRACE] Waiting 10s before next try 2021/11/12 16:21:24 [TRACE] Waiting 10s before next try 2021/11/12 16:21:24 [TRACE] Waiting 10s before next try 2021/11/12 16:21:24 [TRACE] Waiting 10s before next try 2021/11/12 16:21:29 [TRACE] Waiting 10s before next try 2021/11/12 16:21:34 [TRACE] Waiting 10s before next try 2021/11/12 16:21:34 [TRACE] Waiting 10s before next try 2021/11/12 16:21:34 [TRACE] Waiting 10s before next try 2021/11/12 16:21:34 [TRACE] Waiting 10s before next try 2021/11/12 16:21:34 [TRACE] Waiting 10s before next try 2021/11/12 16:21:34 [TRACE] Waiting 10s before next try 2021/11/12 16:21:34 [TRACE] Waiting 10s before next try 2021/11/12 16:21:39 [TRACE] Waiting 10s before next try 2021/11/12 16:21:44 [TRACE] Waiting 10s before next try 2021/11/12 16:21:44 [TRACE] Waiting 10s before next try 2021/11/12 16:21:44 [TRACE] Waiting 10s before next try 2021/11/12 16:21:45 [TRACE] Waiting 10s before next try 2021/11/12 16:21:45 [TRACE] Waiting 10s before next try 2021/11/12 16:21:45 [TRACE] Waiting 10s before next try 2021/11/12 16:21:45 [TRACE] Waiting 10s before next try 2021/11/12 16:21:50 [TRACE] Waiting 10s before next try 2021/11/12 16:21:55 [TRACE] Waiting 10s before next try 2021/11/12 16:21:55 [TRACE] Waiting 10s before next try 2021/11/12 16:21:55 [TRACE] Waiting 10s before next try 2021/11/12 16:21:55 [TRACE] Waiting 10s before next try 2021/11/12 16:21:55 [TRACE] Waiting 10s before next try 2021/11/12 16:21:55 [TRACE] Waiting 10s before next try 2021/11/12 16:21:55 [TRACE] Waiting 10s before next try 2021/11/12 16:22:00 [TRACE] Waiting 10s before next try 2021/11/12 16:22:05 [TRACE] Waiting 10s before next try 2021/11/12 16:22:06 [TRACE] Waiting 10s before next try 2021/11/12 16:22:16 [DEBUG] Completed Sweeper (aws_s3control_multi_region_access_point) in region (us-west-2) in 6m28.307035036s 2021/11/12 16:22:16 Completed Sweepers for region (us-west-2) in 6m28.307241367s 2021/11/12 16:22:16 Sweeper Tests for region (us-west-2) ran successfully: - aws_s3control_multi_region_access_point 2021/11/12 16:22:16 [DEBUG] Running Sweepers for region (us-west-1): 2021/11/12 16:22:16 [DEBUG] Running Sweeper (aws_s3control_multi_region_access_point) in region (us-west-1) 2021/11/12 16:22:16 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:22:16 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:22:16 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:22:17 [WARN] Skipping S3 Multi-Region Access Point sweep for region: us-west-1 2021/11/12 16:22:17 [DEBUG] Completed Sweeper (aws_s3control_multi_region_access_point) in region (us-west-1) in 686.967832ms 2021/11/12 16:22:17 Completed Sweepers for region (us-west-1) in 686.988675ms 2021/11/12 16:22:17 Sweeper Tests for region (us-west-1) ran successfully: - aws_s3control_multi_region_access_point 2021/11/12 16:22:17 [DEBUG] Running Sweepers for region (us-east-2): 2021/11/12 16:22:17 [DEBUG] Running Sweeper (aws_s3control_multi_region_access_point) in region (us-east-2) 2021/11/12 16:22:17 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:22:17 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:22:17 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:22:17 [WARN] Skipping S3 Multi-Region Access Point sweep for region: us-east-2 2021/11/12 16:22:17 [DEBUG] Completed Sweeper (aws_s3control_multi_region_access_point) in region (us-east-2) in 437.813655ms 2021/11/12 16:22:17 Completed Sweepers for region (us-east-2) in 437.828116ms 2021/11/12 16:22:17 Sweeper Tests for region (us-east-2) ran successfully: - aws_s3control_multi_region_access_point 2021/11/12 16:22:17 [DEBUG] Running Sweepers for region (us-east-1): 2021/11/12 16:22:17 [DEBUG] Running Sweeper (aws_s3control_multi_region_access_point) in region (us-east-1) 2021/11/12 16:22:17 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:22:17 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:22:18 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:22:19 [WARN] Skipping S3 Multi-Region Access Point sweep for region: us-east-1 2021/11/12 16:22:19 [DEBUG] Completed Sweeper (aws_s3control_multi_region_access_point) in region (us-east-1) in 1.130747602s 2021/11/12 16:22:19 Completed Sweepers for region (us-east-1) in 1.130766096s 2021/11/12 16:22:19 Sweeper Tests for region (us-east-1) ran successfully: - aws_s3control_multi_region_access_point ok github.com/hashicorp/terraform-provider-aws/internal/sweep 395.612s --- internal/service/s3control/sweep.go | 59 ++++++++++++----------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index db00b2bc8d28..6a707211b593 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -8,6 +8,7 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/s3control" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/go-multierror" @@ -22,8 +23,8 @@ func init() { F: sweepAccessPoints, }) - resource.AddTestSweepers("aws_s3_multi_region_access_point", &resource.Sweeper{ - Name: "aws_s3_multi_region_access_point", + resource.AddTestSweepers("aws_s3control_multi_region_access_point", &resource.Sweeper{ + Name: "aws_s3control_multi_region_access_point", F: sweepMultiRegionAccessPoints, }) } @@ -85,65 +86,53 @@ func sweepAccessPoints(region string) error { } func sweepMultiRegionAccessPoints(region string) error { - client, err := sharedClientForRegion(region) + client, err := sweep.SharedRegionalSweepClient(region) if err != nil { return fmt.Errorf("error getting client: %s", err) } - if client.(*AWSClient).region != endpoints.UsWest2RegionID { - log.Printf("[WARN] Skipping sweep for region: %s", client.(*AWSClient).region) + if region != endpoints.UsWest2RegionID { + log.Printf("[WARN] Skipping S3 Multi-Region Access Point sweep for region: %s", region) return nil } - accountId := client.(*AWSClient).accountid - conn := client.(*AWSClient).s3controlconn - + conn := client.(*conns.AWSClient).S3ControlConn + accountID := client.(*conns.AWSClient).AccountID input := &s3control.ListMultiRegionAccessPointsInput{ - AccountId: aws.String(accountId), + AccountId: aws.String(accountID), } - var sweeperErrs *multierror.Error + sweepResources := make([]*sweep.SweepResource, 0) err = conn.ListMultiRegionAccessPointsPages(input, func(page *s3control.ListMultiRegionAccessPointsOutput, lastPage bool) bool { if page == nil { return !lastPage } - for _, multiRegionAccessPoint := range page.AccessPoints { - input := &s3control.DeleteMultiRegionAccessPointInput{ - AccountId: aws.String(accountId), - Details: &s3control.DeleteMultiRegionAccessPointInput_{ - Name: multiRegionAccessPoint.Name, - }, - } - - name := aws.StringValue(multiRegionAccessPoint.Name) - - log.Printf("[INFO] Deleting S3 Multi-Region Access Point: %s", name) - _, err := conn.DeleteMultiRegionAccessPoint(input) - - if tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchMultiRegionAccessPoint) { - continue - } + for _, accessPoint := range page.AccessPoints { + r := ResourceMultiRegionAccessPoint() + d := r.Data(nil) + d.SetId(MultiRegionAccessPointCreateResourceID(accountID, aws.StringValue(accessPoint.Name))) - if err != nil { - sweeperErr := fmt.Errorf("error deleting S3 Multi-Region Access Point (%s): %w", name, err) - log.Printf("[ERROR] %s", sweeperErr) - sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) - continue - } + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } return !lastPage }) - if testSweepSkipSweepError(err) { + if sweep.SkipSweepError(err) { log.Printf("[WARN] Skipping S3 Multi-Region Access Point sweep for %s: %s", region, err) return nil } if err != nil { - return fmt.Errorf("error listing S3 Multi-Region Access Points: %w", err) + return fmt.Errorf("error listing S3 Multi-Region Access Points (%s): %w", region, err) } - return sweeperErrs.ErrorOrNil() + err = sweep.SweepOrchestrator(sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping S3 Multi-Region Access Points (%s): %w", region, err) + } + + return nil } From b2fafcfd1d72b976423cbd39c355fc85b32f2fc2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 16:50:31 -0500 Subject: [PATCH 142/304] r/aws_s3_access_point: Add 'AccessPointCreateResourceID' and rename 'AccessPointParseID' to 'AccessPointParseResourceID'. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_basic -timeout 180m === RUN TestAccS3ControlAccessPoint_basic === PAUSE TestAccS3ControlAccessPoint_basic === CONT TestAccS3ControlAccessPoint_basic --- PASS: TestAccS3ControlAccessPoint_basic (31.37s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 35.463s --- internal/service/s3control/access_point.go | 34 ++++++++++++------- .../service/s3control/access_point_test.go | 8 ++--- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 3d9581ff3883..0a3a39cf81af 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -178,7 +178,7 @@ func resourceAccessPointCreate(d *schema.ResourceData, meta interface{}) error { func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId, name, err := AccessPointParseID(d.Id()) + accountId, name, err := AccessPointParseResourceID(d.Id()) if err != nil { return err } @@ -291,7 +291,7 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { func resourceAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId, name, err := AccessPointParseID(d.Id()) + accountId, name, err := AccessPointParseResourceID(d.Id()) if err != nil { return err } @@ -327,7 +327,7 @@ func resourceAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { func resourceAccessPointDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId, name, err := AccessPointParseID(d.Id()) + accountId, name, err := AccessPointParseResourceID(d.Id()) if err != nil { return err } @@ -349,21 +349,31 @@ func resourceAccessPointDelete(d *schema.ResourceData, meta interface{}) error { return nil } -// AccessPointParseID returns the Account ID and Access Point Name (S3) or ARN (S3 on Outposts) -func AccessPointParseID(id string) (string, string, error) { - parsedARN, err := arn.Parse(id) +const accessPointResourceIDSeparator = ":" - if err == nil { - return parsedARN.AccountID, id, nil +func AccessPointCreateResourceID(accessPointARN, accountID, accessPointName string) string { + if v, err := arn.Parse(accessPointARN); err != nil && v.Service == "s3-outposts" { + return accessPointARN } - parts := strings.SplitN(id, ":", 2) + parts := []string{accountID, accessPointName} + id := strings.Join(parts, accessPointResourceIDSeparator) - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", fmt.Errorf("unexpected format of ID (%s), expected ACCOUNT_ID:NAME", id) + return id +} + +func AccessPointParseResourceID(id string) (string, string, error) { + if v, err := arn.Parse(id); err == nil { + return v.AccountID, id, nil + } + + parts := strings.Split(id, multiRegionAccessPointResourceIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil } - return parts[0], parts[1], nil + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected account-id%[2]saccess-point-name", id, accessPointResourceIDSeparator) } func expandS3AccessPointVpcConfiguration(vConfig []interface{}) *s3control.VpcConfiguration { diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index 5333c0eb335f..e2ad0348654e 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -333,7 +333,7 @@ func testAccCheckAccessPointDisappears(n string) resource.TestCheckFunc { return fmt.Errorf("No S3 Access Point ID is set") } - accountId, name, err := tfs3control.AccessPointParseID(rs.Primary.ID) + accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } @@ -360,7 +360,7 @@ func testAccCheckAccessPointDestroy(s *terraform.State) error { continue } - accountId, name, err := tfs3control.AccessPointParseID(rs.Primary.ID) + accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } @@ -387,7 +387,7 @@ func testAccCheckAccessPointExists(n string, output *s3control.GetAccessPointOut return fmt.Errorf("No S3 Access Point ID is set") } - accountId, name, err := tfs3control.AccessPointParseID(rs.Primary.ID) + accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } @@ -419,7 +419,7 @@ func testAccCheckAccessPointHasPolicy(n string, fn func() string) resource.TestC return fmt.Errorf("No S3 Access Point ID is set") } - accountId, name, err := tfs3control.AccessPointParseID(rs.Primary.ID) + accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } From a035bd28c76151cd750d10bf8e79bc5434fedf42 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 16:52:40 -0500 Subject: [PATCH 143/304] r/aws_s3_access_point: Modernize sweeper. Acceptance test output: % make sweep SWEEPARGS=-sweep-run=aws_s3_access_point SWEEP=us-west-2,us-west-1,us-east-2,us-east-1 WARNING: This will destroy infrastructure. Use only in development accounts. go test ./internal/sweep -v -tags=sweep -sweep=us-west-2,us-west-1,us-east-2,us-east-1 -sweep-run=aws_s3_access_point -timeout 60m 2021/11/12 16:51:48 [DEBUG] Running Sweepers for region (us-west-2): 2021/11/12 16:51:48 [DEBUG] Running Sweeper (aws_s3_access_point) in region (us-west-2) 2021/11/12 16:51:48 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:51:48 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:49 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:50 [DEBUG] Completed Sweeper (aws_s3_access_point) in region (us-west-2) in 1.494325512s 2021/11/12 16:51:50 Completed Sweepers for region (us-west-2) in 1.494628509s 2021/11/12 16:51:50 Sweeper Tests for region (us-west-2) ran successfully: - aws_s3_access_point 2021/11/12 16:51:50 [DEBUG] Running Sweepers for region (us-west-1): 2021/11/12 16:51:50 [DEBUG] Running Sweeper (aws_s3_access_point) in region (us-west-1) 2021/11/12 16:51:50 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:51:50 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:50 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:51 [DEBUG] Completed Sweeper (aws_s3_access_point) in region (us-west-1) in 1.074664889s 2021/11/12 16:51:51 Completed Sweepers for region (us-west-1) in 1.074734541s 2021/11/12 16:51:51 Sweeper Tests for region (us-west-1) ran successfully: - aws_s3_access_point 2021/11/12 16:51:51 [DEBUG] Running Sweepers for region (us-east-2): 2021/11/12 16:51:51 [DEBUG] Running Sweeper (aws_s3_access_point) in region (us-east-2) 2021/11/12 16:51:51 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:51:51 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:51 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:53 [DEBUG] Completed Sweeper (aws_s3_access_point) in region (us-east-2) in 1.659487442s 2021/11/12 16:51:53 Completed Sweepers for region (us-east-2) in 1.659585639s 2021/11/12 16:51:53 Sweeper Tests for region (us-east-2) ran successfully: - aws_s3_access_point 2021/11/12 16:51:53 [DEBUG] Running Sweepers for region (us-east-1): 2021/11/12 16:51:53 [DEBUG] Running Sweeper (aws_s3_access_point) in region (us-east-1) 2021/11/12 16:51:53 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/12 16:51:53 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:53 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/12 16:51:53 [DEBUG] Completed Sweeper (aws_s3_access_point) in region (us-east-1) in 660.110726ms 2021/11/12 16:51:53 Completed Sweepers for region (us-east-1) in 660.189434ms 2021/11/12 16:51:53 Sweeper Tests for region (us-east-1) ran successfully: - aws_s3_access_point ok github.com/hashicorp/terraform-provider-aws/internal/sweep 8.223s --- internal/service/s3control/sweep.go | 46 ++++++++++------------------- 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index 6a707211b593..c8f712cd7045 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -10,8 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/s3control" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/sweep" @@ -34,14 +32,12 @@ func sweepAccessPoints(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - - accountId := client.(*conns.AWSClient).AccountID conn := client.(*conns.AWSClient).S3ControlConn - + accountID := client.(*conns.AWSClient).AccountID input := &s3control.ListAccessPointsInput{ - AccountId: aws.String(accountId), + AccountId: aws.String(accountID), } - var sweeperErrs *multierror.Error + sweepResources := make([]*sweep.SweepResource, 0) err = conn.ListAccessPointsPages(input, func(page *s3control.ListAccessPointsOutput, lastPage bool) bool { if page == nil { @@ -49,25 +45,11 @@ func sweepAccessPoints(region string) error { } for _, accessPoint := range page.AccessPointList { - input := &s3control.DeleteAccessPointInput{ - AccountId: aws.String(accountId), - Name: accessPoint.Name, - } - name := aws.StringValue(accessPoint.Name) - - log.Printf("[INFO] Deleting S3 Access Point: %s", name) - _, err := conn.DeleteAccessPoint(input) - - if tfawserr.ErrMessageContains(err, "NoSuchAccessPoint", "") { - continue - } - - if err != nil { - sweeperErr := fmt.Errorf("error deleting S3 Access Point (%s): %w", name, err) - log.Printf("[ERROR] %s", sweeperErr) - sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) - continue - } + r := ResourceAccessPoint() + d := r.Data(nil) + d.SetId(AccessPointCreateResourceID(aws.StringValue(accessPoint.AccessPointArn), accountID, aws.StringValue(accessPoint.Name))) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } return !lastPage @@ -79,10 +61,16 @@ func sweepAccessPoints(region string) error { } if err != nil { - return fmt.Errorf("error listing S3 Access Points: %w", err) + return fmt.Errorf("error listing SS3 Access Points (%s): %w", region, err) } - return sweeperErrs.ErrorOrNil() + err = sweep.SweepOrchestrator(sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping S3 Access Points (%s): %w", region, err) + } + + return nil } func sweepMultiRegionAccessPoints(region string) error { @@ -90,12 +78,10 @@ func sweepMultiRegionAccessPoints(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - if region != endpoints.UsWest2RegionID { log.Printf("[WARN] Skipping S3 Multi-Region Access Point sweep for region: %s", region) return nil } - conn := client.(*conns.AWSClient).S3ControlConn accountID := client.(*conns.AWSClient).AccountID input := &s3control.ListMultiRegionAccessPointsInput{ From f71befe9567266aa036681102fb3c345f4522fdb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 17:34:59 -0500 Subject: [PATCH 144/304] r/aws_s3control_multi_region_access_point: Add 'TestAccS3ControlMultiRegionAccessPoint_threeRegions' and skip tests in GovCloud. --- .../s3control/multi_region_access_point.go | 42 ++--- .../multi_region_access_point_policy.go | 11 +- .../multi_region_access_point_policy_test.go | 43 +++--- .../multi_region_access_point_test.go | 146 +++++++++++++++--- 4 files changed, 178 insertions(+), 64 deletions(-) diff --git a/internal/service/s3control/multi_region_access_point.go b/internal/service/s3control/multi_region_access_point.go index 1e81aa4af8a1..bfaa90080cad 100644 --- a/internal/service/s3control/multi_region_access_point.go +++ b/internal/service/s3control/multi_region_access_point.go @@ -132,7 +132,7 @@ func ResourceMultiRegionAccessPoint() *schema.Resource { } func resourceMultiRegionAccessPointCreate(d *schema.ResourceData, meta interface{}) error { - conn, err := s3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConn(meta.(*conns.AWSClient)) if err != nil { return err @@ -172,7 +172,7 @@ func resourceMultiRegionAccessPointCreate(d *schema.ResourceData, meta interface } func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{}) error { - conn, err := s3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConn(meta.(*conns.AWSClient)) if err != nil { return err @@ -221,7 +221,7 @@ func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{} } func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface{}) error { - conn, err := s3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConn(meta.(*conns.AWSClient)) if err != nil { return err @@ -258,6 +258,24 @@ func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface return nil } +func S3ControlConn(client *conns.AWSClient) (*s3control.S3Control, error) { + originalConn := client.S3ControlConn + // All Multi-Region Access Point actions are routed to the US West (Oregon) Region. + region := endpoints.UsWest2RegionID + + if originalConn.Config.Region != nil && aws.StringValue(originalConn.Config.Region) == region { + return originalConn, nil + } + + sess, err := conns.NewSessionForRegion(&originalConn.Config, region, client.TerraformVersion) + + if err != nil { + return nil, fmt.Errorf("error creating AWS session: %w", err) + } + + return s3control.New(sess), nil +} + const multiRegionAccessPointResourceIDSeparator = ":" func MultiRegionAccessPointCreateResourceID(accountID, accessPointName string) string { @@ -444,21 +462,3 @@ func flattenRegionReports(apiObjects []*s3control.RegionReport) []interface{} { return tfList } - -func s3ControlConn(client *conns.AWSClient) (*s3control.S3Control, error) { - originalConn := client.S3ControlConn - // All Multi-Region Access Point actions are routed to the US West (Oregon) Region. - region := endpoints.UsWest2RegionID - - if originalConn.Config.Region != nil && aws.StringValue(originalConn.Config.Region) == region { - return originalConn, nil - } - - sess, err := conns.NewSessionForRegion(&originalConn.Config, region, client.TerraformVersion) - - if err != nil { - return nil, fmt.Errorf("error creating AWS session: %w", err) - } - - return s3control.New(sess), nil -} diff --git a/internal/service/s3control/multi_region_access_point_policy.go b/internal/service/s3control/multi_region_access_point_policy.go index 68a2617b1f1f..704a87547005 100644 --- a/internal/service/s3control/multi_region_access_point_policy.go +++ b/internal/service/s3control/multi_region_access_point_policy.go @@ -73,7 +73,7 @@ func ResourceMultiRegionAccessPointPolicy() *schema.Resource { } func resourceMultiRegionAccessPointPolicyCreate(d *schema.ResourceData, meta interface{}) error { - conn, err := s3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConn(meta.(*conns.AWSClient)) if err != nil { return err @@ -113,7 +113,7 @@ func resourceMultiRegionAccessPointPolicyCreate(d *schema.ResourceData, meta int } func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta interface{}) error { - conn, err := s3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConn(meta.(*conns.AWSClient)) if err != nil { return err @@ -160,9 +160,10 @@ func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta inter } func resourceMultiRegionAccessPointPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - conn, err := s3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConn(meta.(*conns.AWSClient)) + if err != nil { - return fmt.Errorf("Error getting S3Control Client: %s", err) + return err } accountID, _, err := MultiRegionAccessPointParseResourceID(d.Id()) @@ -183,7 +184,7 @@ func resourceMultiRegionAccessPointPolicyUpdate(d *schema.ResourceData, meta int output, err := conn.PutMultiRegionAccessPointPolicy(input) if err != nil { - return fmt.Errorf("error updating S3 Multi-Region Access Point (%s) Policy: %w", d.Id(), err) + return fmt.Errorf("error updating S3 Multi-Region Access Point Policy (%s): %w", d.Id(), err) } _, err = waitMultiRegionAccessPointRequestSucceeded(conn, accountID, aws.StringValue(output.RequestTokenARN), d.Timeout(schema.TimeoutUpdate)) diff --git a/internal/service/s3control/multi_region_access_point_policy_test.go b/internal/service/s3control/multi_region_access_point_policy_test.go index 5a5214c2dcce..4b90b3d29f19 100644 --- a/internal/service/s3control/multi_region_access_point_policy_test.go +++ b/internal/service/s3control/multi_region_access_point_policy_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/s3control" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -21,11 +20,12 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_basic(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) multiRegionAccessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, // Multi-Region Access Point Policy cannot be deleted once applied. @@ -61,11 +61,12 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_disappears_MultiRegionAccessPo bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, // Multi-Region Access Point Policy cannot be deleted once applied. @@ -90,11 +91,12 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_details_policy(t *testing.T) { multiRegionAccessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, // Multi-Region Access Point Policy cannot be deleted once applied. @@ -130,11 +132,12 @@ func TestAccS3ControlMultiRegionAccessPointPolicy_details_name(t *testing.T) { multiRegionAccessPointName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, // Multi-Region Access Point Policy cannot be deleted once applied. @@ -181,7 +184,11 @@ func testAccCheckMultiRegionAccessPointPolicyExists(n string, v *s3control.Multi return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + conn, err := tfs3control.S3ControlConn(acctest.Provider.Meta().(*conns.AWSClient)) + + if err != nil { + return err + } output, err := tfs3control.FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn, accountID, name) diff --git a/internal/service/s3control/multi_region_access_point_test.go b/internal/service/s3control/multi_region_access_point_test.go index 49ee124622c8..a0aa5b83a4d4 100644 --- a/internal/service/s3control/multi_region_access_point_test.go +++ b/internal/service/s3control/multi_region_access_point_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/s3control" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -23,11 +23,12 @@ func TestAccS3ControlMultiRegionAccessPoint_basic(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, @@ -48,7 +49,9 @@ func TestAccS3ControlMultiRegionAccessPoint_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.ignore_public_acls", "true"), resource.TestCheckResourceAttr(resourceName, "details.0.public_access_block.0.restrict_public_buckets", "true"), resource.TestCheckResourceAttr(resourceName, "details.0.region.#", "1"), - resource.TestCheckResourceAttr(resourceName, "details.0.region.0.bucket", bucketName), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "details.0.region.*", map[string]string{ + "bucket": bucketName, + }), resource.TestCheckResourceAttr(resourceName, "status", s3control.MultiRegionAccessPointStatusReady), ), }, @@ -67,11 +70,12 @@ func TestAccS3ControlMultiRegionAccessPoint_disappears(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, @@ -94,11 +98,12 @@ func TestAccS3ControlMultiRegionAccessPoint_PublicAccessBlock(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, @@ -130,11 +135,12 @@ func TestAccS3ControlMultiRegionAccessPoint_name(t *testing.T) { rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, @@ -163,8 +169,57 @@ func TestAccS3ControlMultiRegionAccessPoint_name(t *testing.T) { }) } +func TestAccS3ControlMultiRegionAccessPoint_threeRegions(t *testing.T) { + var providers []*schema.Provider + var v s3control.MultiRegionAccessPointReport + resourceName := "aws_s3control_multi_region_access_point.test" + bucket1Name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucket2Name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucket3Name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + if acctest.Partition() == "aws-us-gov" { + t.Skip("S3 Multi-Region Access Point is not supported in GovCloud partition") + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckMultipleRegion(t, 3) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + ProviderFactories: acctest.FactoriesMultipleRegion(&providers, 3), + CheckDestroy: testAccCheckMultiRegionAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiRegionAccessPointConfig_threeRegions(bucket1Name, bucket2Name, bucket3Name, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMultiRegionAccessPointExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "details.0.region.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "details.0.region.*", map[string]string{ + "bucket": bucket1Name, + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "details.0.region.*", map[string]string{ + "bucket": bucket2Name, + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "details.0.region.*", map[string]string{ + "bucket": bucket3Name, + }), + resource.TestCheckResourceAttr(resourceName, "status", s3control.MultiRegionAccessPointStatusReady), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + conn, err := tfs3control.S3ControlConn(acctest.Provider.Meta().(*conns.AWSClient)) + + if err != nil { + return err + } for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3control_multi_region_access_point" { @@ -210,7 +265,11 @@ func testAccCheckMultiRegionAccessPointExists(n string, v *s3control.MultiRegion return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + conn, err := tfs3control.S3ControlConn(acctest.Provider.Meta().(*conns.AWSClient)) + + if err != nil { + return err + } output, err := tfs3control.FindMultiRegionAccessPointByAccountIDAndName(conn, accountID, name) @@ -281,3 +340,50 @@ resource "aws_s3control_multi_region_access_point" "test" { } `, bucketName, multiRegionAccessPointName) } + +func testAccMultiRegionAccessPointConfig_threeRegions(bucketName1, bucketName2, bucketName3, multiRegionAccessPointName string) string { + return acctest.ConfigCompose( + acctest.ConfigMultipleRegionProvider(3), + fmt.Sprintf(` +resource "aws_s3_bucket" "test1" { + provider = aws + + bucket = %[1]q + force_destroy = true +} + +resource "aws_s3_bucket" "test2" { + provider = awsalternate + + bucket = %[2]q + force_destroy = true +} + +resource "aws_s3_bucket" "test3" { + provider = awsthird + + bucket = %[3]q + force_destroy = true +} + +resource "aws_s3control_multi_region_access_point" "test" { + provider = aws + + details { + name = %[4]q + + region { + bucket = aws_s3_bucket.test1.id + } + + region { + bucket = aws_s3_bucket.test2.id + } + + region { + bucket = aws_s3_bucket.test3.id + } + } +} +`, bucketName1, bucketName2, bucketName3, multiRegionAccessPointName)) +} From 50c2a09ea94500fe18f7d8a31063e2359bd7a404 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 12 Nov 2021 18:03:43 -0500 Subject: [PATCH 145/304] Fix golangci-lint errors. --- internal/service/s3control/multi_region_access_point.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/internal/service/s3control/multi_region_access_point.go b/internal/service/s3control/multi_region_access_point.go index bfaa90080cad..829be111286a 100644 --- a/internal/service/s3control/multi_region_access_point.go +++ b/internal/service/s3control/multi_region_access_point.go @@ -206,12 +206,8 @@ func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{} d.Set("account_id", accountID) d.Set("alias", alias) d.Set("arn", arn) - if accessPoint != nil { - if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointReport(accessPoint)}); err != nil { - return fmt.Errorf("error setting details: %w", err) - } - } else { - d.Set("details", nil) + if err := d.Set("details", []interface{}{flattenMultiRegionAccessPointReport(accessPoint)}); err != nil { + return fmt.Errorf("error setting details: %w", err) } // https://docs.aws.amazon.com/AmazonS3/latest/userguide//MultiRegionAccessPointRequests.html#MultiRegionAccessPointHostnames. d.Set("domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.accesspoint.s3-global", alias))) From 0910c1655a47476b9e0a98185a0206f1831c8081 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 12 Nov 2021 11:33:36 -0800 Subject: [PATCH 146/304] Naming corrections --- internal/service/appstream/directory_config.go | 18 +++++++++--------- .../service/appstream/directory_config_test.go | 4 ++-- .../r/appstream_directory_config.html.markdown | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/internal/service/appstream/directory_config.go b/internal/service/appstream/directory_config.go index 45c06ee829ba..b331d52ad7e8 100644 --- a/internal/service/appstream/directory_config.go +++ b/internal/service/appstream/directory_config.go @@ -78,11 +78,11 @@ func resourceDirectoryConfigCreate(ctx context.Context, d *schema.ResourceData, output, err := conn.CreateDirectoryConfigWithContext(ctx, input) if err != nil { - return diag.FromErr(fmt.Errorf("error creating Appstream DirectoryConfig (%s): %w", directoryName, err)) + return diag.FromErr(fmt.Errorf("error creating AppStream Directory Config (%s): %w", directoryName, err)) } if output == nil || output.DirectoryConfig == nil { - return diag.Errorf("error creating AppStream DirectoryConfig (%s): empty response", directoryName) + return diag.Errorf("error creating AppStream Directory Config (%s): empty response", directoryName) } d.SetId(aws.StringValue(output.DirectoryConfig.DirectoryName)) @@ -96,21 +96,21 @@ func resourceDirectoryConfigRead(ctx context.Context, d *schema.ResourceData, me resp, err := conn.DescribeDirectoryConfigsWithContext(ctx, &appstream.DescribeDirectoryConfigsInput{DirectoryNames: []*string{aws.String(d.Id())}}) if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] Appstream DirectoryConfig (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] AppStream Directory Config (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return diag.FromErr(fmt.Errorf("error reading Appstream DirectoryConfig (%s): %w", d.Id(), err)) + return diag.FromErr(fmt.Errorf("error reading AppStream Directory Config (%s): %w", d.Id(), err)) } if len(resp.DirectoryConfigs) == 0 { - return diag.FromErr(fmt.Errorf("error reading Appstream DirectoryConfig (%s): %s", d.Id(), "empty response")) + return diag.FromErr(fmt.Errorf("error reading AppStream Directory Config (%s): %s", d.Id(), "empty response")) } if len(resp.DirectoryConfigs) > 1 { - return diag.FromErr(fmt.Errorf("error reading Appstream DirectoryConfig (%s): %s", d.Id(), "multiple directories config found")) + return diag.FromErr(fmt.Errorf("error reading AppStream Directory Config (%s): %s", d.Id(), "multiple Directory Configs found")) } directoryConfig := resp.DirectoryConfigs[0] @@ -120,7 +120,7 @@ func resourceDirectoryConfigRead(ctx context.Context, d *schema.ResourceData, me d.Set("organizational_unit_distinguished_names", flex.FlattenStringSet(directoryConfig.OrganizationalUnitDistinguishedNames)) if err = d.Set("service_account_credentials", flattenServiceAccountCredentials(directoryConfig.ServiceAccountCredentials, d)); err != nil { - return diag.FromErr(fmt.Errorf("error setting `%s` for AppStream DirectoryConfig (%s): %w", "service_account_credentials", d.Id(), err)) + return diag.FromErr(fmt.Errorf("error setting `%s` for AppStream Directory Config (%s): %w", "service_account_credentials", d.Id(), err)) } return nil @@ -142,7 +142,7 @@ func resourceDirectoryConfigUpdate(ctx context.Context, d *schema.ResourceData, _, err := conn.UpdateDirectoryConfigWithContext(ctx, input) if err != nil { - return diag.FromErr(fmt.Errorf("error updating Appstream DirectoryConfig (%s): %w", d.Id(), err)) + return diag.FromErr(fmt.Errorf("error updating AppStream Directory Config (%s): %w", d.Id(), err)) } return resourceDirectoryConfigRead(ctx, d, meta) @@ -159,7 +159,7 @@ func resourceDirectoryConfigDelete(ctx context.Context, d *schema.ResourceData, if tfawserr.ErrCodeEquals(err, appstream.ErrCodeResourceNotFoundException) { return nil } - return diag.FromErr(fmt.Errorf("error deleting Appstream DirectoryConfig (%s): %w", d.Id(), err)) + return diag.FromErr(fmt.Errorf("error deleting AppStream Directory Config (%s): %w", d.Id(), err)) } return nil } diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index 654d7c824a43..6c9ddfaf14ee 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -102,7 +102,7 @@ func testAccCheckDirectoryConfigExists(resourceName string, appStreamDirectoryCo } if resp == nil && len(resp.DirectoryConfigs) == 0 { - return fmt.Errorf("appstream directory config %q does not exist", rs.Primary.ID) + return fmt.Errorf("AppStream Directory Config %q does not exist", rs.Primary.ID) } *appStreamDirectoryConfig = *resp.DirectoryConfigs[0] @@ -130,7 +130,7 @@ func testAccCheckDirectoryConfigDestroy(s *terraform.State) error { } if resp != nil && len(resp.DirectoryConfigs) > 0 { - return fmt.Errorf("appstream directory config %q still exists", rs.Primary.ID) + return fmt.Errorf("AppStream Directory Config %q still exists", rs.Primary.ID) } } diff --git a/website/docs/r/appstream_directory_config.html.markdown b/website/docs/r/appstream_directory_config.html.markdown index 47fcc9d5a309..1d50e27204a4 100644 --- a/website/docs/r/appstream_directory_config.html.markdown +++ b/website/docs/r/appstream_directory_config.html.markdown @@ -14,7 +14,7 @@ Provides an AppStream Directory Config. ```terraform resource "aws_appstream_directory_config" "example" { - directory_name = "NAME OF DIRECTORY CONFIG" + directory_name = "NAME OF DIRECTORY" organizational_unit_distinguished_names = ["DISTINGUISHED NAME"] service_account_credentials { From 8be70b1b4eba4fa96cd933f3285f36c3cf009d4e Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 12 Nov 2021 18:43:06 -0500 Subject: [PATCH 147/304] support in-place update of engine_version --- internal/service/neptune/cluster.go | 7 +- internal/service/neptune/cluster_test.go | 81 ++++++++++++++++++++++++ internal/service/neptune/wait.go | 1 + 3 files changed, 88 insertions(+), 1 deletion(-) diff --git a/internal/service/neptune/cluster.go b/internal/service/neptune/cluster.go index 6351411ca0a9..ba4ab1ad96e1 100644 --- a/internal/service/neptune/cluster.go +++ b/internal/service/neptune/cluster.go @@ -140,7 +140,6 @@ func ResourceCluster() *schema.Resource { "engine_version": { Type: schema.TypeString, Optional: true, - ForceNew: true, Computed: true, }, @@ -625,11 +624,17 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool)) requestUpdate = true } + if d.HasChange("deletion_protection") { req.DeletionProtection = aws.Bool(d.Get("deletion_protection").(bool)) requestUpdate = true } + if d.HasChange("engine_version") { + req.EngineVersion = aws.String(d.Get("engine_version").(string)) + requestUpdate = true + } + if requestUpdate { err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.ModifyDBCluster(req) diff --git a/internal/service/neptune/cluster_test.go b/internal/service/neptune/cluster_test.go index a59c60278af8..32801420ea91 100644 --- a/internal/service/neptune/cluster_test.go +++ b/internal/service/neptune/cluster_test.go @@ -462,6 +462,57 @@ func TestAccNeptuneCluster_updateCloudWatchLogsExports(t *testing.T) { }) } +func TestAccNeptuneCluster_updateEngineVersion(t *testing.T) { + var dbCluster neptune.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_neptune_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, neptune.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_engineVersion(rName, "1.0.2.1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "engine_version", "1.0.2.1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "apply_immediately", + "cluster_identifier_prefix", + "final_snapshot_identifier", + "skip_final_snapshot", + }, + }, + { + Config: testAccClusterConfig_engineVersion(rName, "1.0.5.1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "engine_version", "1.0.5.1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "apply_immediately", + "cluster_identifier_prefix", + "final_snapshot_identifier", + "skip_final_snapshot", + }, + }, + }, + }) +} + func TestAccNeptuneCluster_deleteProtection(t *testing.T) { var dbCluster neptune.DBCluster rName := sdkacctest.RandomWithPrefix("tf-acc") @@ -1075,3 +1126,33 @@ resource "aws_neptune_cluster" "test" { } `, rName)) } + +func testAccClusterConfig_engineVersion(rName, engineVersion string) string { + return acctest.ConfigCompose(testAccClusterBaseConfig(), fmt.Sprintf(` +resource "aws_neptune_cluster" "test" { + cluster_identifier = %[1]q + apply_immediately = true + availability_zones = local.availability_zone_names + engine_version = %[2]q + neptune_cluster_parameter_group_name = "default.neptune1" + skip_final_snapshot = true +} + +data "aws_neptune_orderable_db_instance" "test" { + engine = "neptune" + engine_version = aws_neptune_cluster.test.engine_version + license_model = "amazon-license" + + preferred_instance_classes = ["db.t3.medium", "db.r5.large", "db.r4.large"] +} + +resource "aws_neptune_cluster_instance" "test" { + identifier = %[1]q + cluster_identifier = aws_neptune_cluster.test.id + apply_immediately = true + instance_class = data.aws_neptune_orderable_db_instance.test.instance_class + neptune_parameter_group_name = aws_neptune_cluster.test.neptune_cluster_parameter_group_name + promotion_tier = "3" +} +`, rName, engineVersion)) +} diff --git a/internal/service/neptune/wait.go b/internal/service/neptune/wait.go index e94d53f9ed27..b214425eb28b 100644 --- a/internal/service/neptune/wait.go +++ b/internal/service/neptune/wait.go @@ -71,6 +71,7 @@ func WaitDBClusterAvailable(conn *neptune.Neptune, id string, timeout time.Durat "preparing-data-migration", "migrating", "configuring-iam-database-auth", + "upgrading", }, Target: []string{"available"}, Refresh: StatusCluster(conn, id), From 9328f3d4e3ceb23a2bb134d5585cf2d776b44389 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 12 Nov 2021 18:46:17 -0500 Subject: [PATCH 148/304] Update CHANGELOG for #21760 --- .changelog/21760.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21760.txt diff --git a/.changelog/21760.txt b/.changelog/21760.txt new file mode 100644 index 000000000000..7ef478e146ec --- /dev/null +++ b/.changelog/21760.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_neptune_cluster: Support in-place update of `engine_version` +``` \ No newline at end of file From 1338345a3dea9afda39938184d76847dd4309a6c Mon Sep 17 00:00:00 2001 From: changelogbot Date: Sat, 13 Nov 2021 00:17:09 +0000 Subject: [PATCH 149/304] Update CHANGELOG.md for #21750 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d60018092f54..34a501a24916 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## 3.66.0 (Unreleased) +FEATURES: + +* **New Resource:** `aws_s3control_multi_region_access_point` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) +* **New Resource:** `aws_s3control_multi_region_access_point_policy` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) + ENHANCEMENTS: * resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) From 3454306ca9919304a6d781a2f89dec923399c0da Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 22:46:43 +0200 Subject: [PATCH 150/304] add `deployment_config` --- internal/service/sagemaker/endpoint.go | 381 ++++++++++++++++++-- internal/service/sagemaker/endpoint_test.go | 98 +++++ internal/service/sagemaker/find.go | 29 ++ 3 files changed, 472 insertions(+), 36 deletions(-) diff --git a/internal/service/sagemaker/endpoint.go b/internal/service/sagemaker/endpoint.go index bc97af63f5e6..9d11700f1712 100644 --- a/internal/service/sagemaker/endpoint.go +++ b/internal/service/sagemaker/endpoint.go @@ -9,8 +9,10 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -29,7 +31,121 @@ func ResourceEndpoint() *schema.Resource { Type: schema.TypeString, Computed: true, }, - + "deployment_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "blue_green_update_policy": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "maximum_execution_timeout_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(600, 14400), + }, + "termination_wait_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntBetween(0, 3600), + }, + "traffic_routing_configuration": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "canary_size": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(sagemaker.CapacitySizeType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + "linear_step_size": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(sagemaker.CapacitySizeType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(sagemaker.TrafficRoutingConfigType_Values(), false), + }, + "wait_interval_in_seconds": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 3600), + }, + }, + }, + }, + }, + }, + }, + "auto_rollback_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarms": { + Type: schema.TypeSet, + Optional: true, + MinItems: 1, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "endpoint_config_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validName, + }, "name": { Type: schema.TypeString, Optional: true, @@ -37,13 +153,6 @@ func ResourceEndpoint() *schema.Resource { ForceNew: true, ValidateFunc: validName, }, - - "endpoint_config_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validName, - }, - "tags": tftags.TagsSchema(), "tags_all": tftags.TagsSchemaComputed(), }, @@ -69,6 +178,10 @@ func resourceEndpointCreate(d *schema.ResourceData, meta interface{}) error { EndpointConfigName: aws.String(d.Get("endpoint_config_name").(string)), } + if v, ok := d.GetOk("deployment_config"); ok && (len(v.([]interface{})) > 0) { + createOpts.DeploymentConfig = expandEndpointDeploymentConfig(v.([]interface{})) + } + if len(tags) > 0 { createOpts.Tags = Tags(tags.IgnoreAWS()) } @@ -86,7 +199,7 @@ func resourceEndpointCreate(d *schema.ResourceData, meta interface{}) error { } if err := conn.WaitUntilEndpointInService(describeInput); err != nil { - return fmt.Errorf("error waiting for SageMaker Endpoint (%s) to be in service: %s", name, err) + return fmt.Errorf("error waiting for SageMaker Endpoint (%s) to be in service: %w", name, err) } return resourceEndpointRead(d, meta) @@ -97,34 +210,24 @@ func resourceEndpointRead(d *schema.ResourceData, meta interface{}) error { defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - describeInput := &sagemaker.DescribeEndpointInput{ - EndpointName: aws.String(d.Id()), - } + endpoint, err := FindEndpointByName(conn, d.Id()) - endpoint, err := conn.DescribeEndpoint(describeInput) - if err != nil { - if tfawserr.ErrMessageContains(err, "ValidationException", "") { - log.Printf("[INFO] unable to find the SageMaker Endpoint resource and therefore it is removed from the state: %s", d.Id()) - d.SetId("") - return nil - } - return err - } - if aws.StringValue(endpoint.EndpointStatus) == sagemaker.EndpointStatusDeleting { - log.Printf("[WARN] SageMaker Endpoint (%s) is deleting, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] SageMaker Endpoint (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - if err := d.Set("name", endpoint.EndpointName); err != nil { - return err - } - if err := d.Set("endpoint_config_name", endpoint.EndpointConfigName); err != nil { - return err + if err != nil { + return fmt.Errorf("error reading SageMaker Endpoint (%s): %w", d.Id(), err) } - if err := d.Set("arn", endpoint.EndpointArn); err != nil { - return err + d.Set("name", endpoint.EndpointName) + d.Set("endpoint_config_name", endpoint.EndpointConfigName) + d.Set("arn", endpoint.EndpointArn) + + if err := d.Set("deployment_config", flattenEndpointDeploymentConfig(endpoint.LastDeploymentConfig)); err != nil { + return fmt.Errorf("error setting deployment_config for SageMaker Endpoint (%s): %w", d.Id(), err) } tags, err := ListTags(conn, aws.StringValue(endpoint.EndpointArn)) @@ -153,19 +256,23 @@ func resourceEndpointUpdate(d *schema.ResourceData, meta interface{}) error { o, n := d.GetChange("tags_all") if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating Sagemaker Endpoint (%s) tags: %s", d.Id(), err) + return fmt.Errorf("error updating Sagemaker Endpoint (%s) tags: %w", d.Id(), err) } } - if d.HasChange("endpoint_config_name") { + if d.HasChanges("endpoint_config_name", "deployment_config") { modifyOpts := &sagemaker.UpdateEndpointInput{ EndpointName: aws.String(d.Id()), EndpointConfigName: aws.String(d.Get("endpoint_config_name").(string)), } + if v, ok := d.GetOk("deployment_config"); ok && (len(v.([]interface{})) > 0) { + modifyOpts.DeploymentConfig = expandEndpointDeploymentConfig(v.([]interface{})) + } + log.Printf("[INFO] Modifying endpoint_config_name attribute for %s: %#v", d.Id(), modifyOpts) if _, err := conn.UpdateEndpoint(modifyOpts); err != nil { - return fmt.Errorf("error updating SageMaker Endpoint (%s): %s", d.Id(), err) + return fmt.Errorf("error updating SageMaker Endpoint (%s): %w", d.Id(), err) } describeInput := &sagemaker.DescribeEndpointInput{ @@ -174,7 +281,7 @@ func resourceEndpointUpdate(d *schema.ResourceData, meta interface{}) error { err := conn.WaitUntilEndpointInService(describeInput) if err != nil { - return fmt.Errorf("error waiting for SageMaker Endpoint (%s) to be in service: %s", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker Endpoint (%s) to be in service: %w", d.Id(), err) } } @@ -196,7 +303,7 @@ func resourceEndpointDelete(d *schema.ResourceData, meta interface{}) error { } if err != nil { - return fmt.Errorf("error deleting SageMaker Endpoint (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting SageMaker Endpoint (%s): %w", d.Id(), err) } describeInput := &sagemaker.DescribeEndpointInput{ @@ -204,8 +311,210 @@ func resourceEndpointDelete(d *schema.ResourceData, meta interface{}) error { } if err := conn.WaitUntilEndpointDeleted(describeInput); err != nil { - return fmt.Errorf("error waiting for SageMaker Endpoint (%s) to be deleted: %s", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker Endpoint (%s) to be deleted: %w", d.Id(), err) } return nil } + +func expandEndpointDeploymentConfig(configured []interface{}) *sagemaker.DeploymentConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.DeploymentConfig{ + BlueGreenUpdatePolicy: expandEndpointDeploymentConfigBlueGreenUpdatePolicy(m["blue_green_update_policy"].([]interface{})), + } + + if v, ok := m["auto_rollback_configuration"].([]interface{}); ok && len(v) > 0 { + c.AutoRollbackConfiguration = expandEndpointDeploymentConfigAutoRollbackConfig(v) + } + + return c +} + +func flattenEndpointDeploymentConfig(configured *sagemaker.DeploymentConfig) []map[string]interface{} { + if configured == nil { + return []map[string]interface{}{} + } + + cfg := map[string]interface{}{ + "blue_green_update_policy": flattenEndpointDeploymentConfigBlueGreenUpdatePolicy(configured.BlueGreenUpdatePolicy), + } + + if configured.AutoRollbackConfiguration != nil { + cfg["auto_rollback_configuration"] = flattenEndpointDeploymentConfigAutoRollbackConfig(configured.AutoRollbackConfiguration) + } + + return []map[string]interface{}{cfg} +} + +func expandEndpointDeploymentConfigBlueGreenUpdatePolicy(configured []interface{}) *sagemaker.BlueGreenUpdatePolicy { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.BlueGreenUpdatePolicy{ + TerminationWaitInSeconds: aws.Int64(int64(m["termination_wait_in_seconds"].(int))), + TrafficRoutingConfiguration: expandEndpointDeploymentConfigTrafficRoutingConfiguration(m["traffic_routing_configuration"].([]interface{})), + } + + if v, ok := m["maximum_execution_timeout_in_seconds"].(int); ok && v > 0 { + c.MaximumExecutionTimeoutInSeconds = aws.Int64(int64(v)) + } + + return c +} + +func flattenEndpointDeploymentConfigBlueGreenUpdatePolicy(configured *sagemaker.BlueGreenUpdatePolicy) []map[string]interface{} { + if configured == nil { + return []map[string]interface{}{} + } + + cfg := map[string]interface{}{ + "termination_wait_in_seconds": aws.Int64Value(configured.TerminationWaitInSeconds), + "traffic_routing_configuration": flattenEndpointDeploymentConfigTrafficRoutingConfiguration(configured.TrafficRoutingConfiguration), + } + + if configured.MaximumExecutionTimeoutInSeconds != nil { + cfg["maximum_execution_timeout_in_seconds"] = aws.Int64Value(configured.MaximumExecutionTimeoutInSeconds) + } + + return []map[string]interface{}{cfg} +} + +func expandEndpointDeploymentConfigTrafficRoutingConfiguration(configured []interface{}) *sagemaker.TrafficRoutingConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.TrafficRoutingConfig{ + Type: aws.String(m["type"].(string)), + WaitIntervalInSeconds: aws.Int64(int64(m["wait_interval_in_seconds"].(int))), + } + + if v, ok := m["canary_size"].([]interface{}); ok && len(v) > 0 { + c.CanarySize = expandEndpointDeploymentConfigTrafficRoutingConfigurationCapacitySize(v) + } + + if v, ok := m["linear_step_size"].([]interface{}); ok && len(v) > 0 { + c.LinearStepSize = expandEndpointDeploymentConfigTrafficRoutingConfigurationCapacitySize(v) + } + + return c +} + +func flattenEndpointDeploymentConfigTrafficRoutingConfiguration(configured *sagemaker.TrafficRoutingConfig) []map[string]interface{} { + if configured == nil { + return []map[string]interface{}{} + } + + cfg := map[string]interface{}{ + "type": aws.StringValue(configured.Type), + "wait_interval_in_seconds": aws.Int64Value(configured.WaitIntervalInSeconds), + } + + if configured.CanarySize != nil { + cfg["canary_size"] = flattenEndpointDeploymentConfigTrafficRoutingConfigurationCapacitySize(configured.CanarySize) + } + + if configured.LinearStepSize != nil { + cfg["linear_step_size"] = flattenEndpointDeploymentConfigTrafficRoutingConfigurationCapacitySize(configured.LinearStepSize) + } + + return []map[string]interface{}{cfg} +} + +func expandEndpointDeploymentConfigTrafficRoutingConfigurationCapacitySize(configured []interface{}) *sagemaker.CapacitySize { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.CapacitySize{ + Type: aws.String(m["type"].(string)), + Value: aws.Int64(int64(m["value"].(int))), + } + + return c +} + +func flattenEndpointDeploymentConfigTrafficRoutingConfigurationCapacitySize(configured *sagemaker.CapacitySize) []map[string]interface{} { + if configured == nil { + return []map[string]interface{}{} + } + + cfg := map[string]interface{}{ + "type": aws.StringValue(configured.Type), + "value": aws.Int64Value(configured.Value), + } + + return []map[string]interface{}{cfg} +} + +func expandEndpointDeploymentConfigAutoRollbackConfig(configured []interface{}) *sagemaker.AutoRollbackConfig { + if len(configured) == 0 { + return nil + } + + m := configured[0].(map[string]interface{}) + + c := &sagemaker.AutoRollbackConfig{ + Alarms: expandEndpointDeploymentConfigAutoRollbackConfigAlarms(m["alarms"].([]interface{})), + } + + return c +} + +func flattenEndpointDeploymentConfigAutoRollbackConfig(configured *sagemaker.AutoRollbackConfig) []map[string]interface{} { + if configured == nil { + return []map[string]interface{}{} + } + + cfg := map[string]interface{}{ + "alarms": flattenEndpointDeploymentConfigAutoRollbackConfigAlarms(configured.Alarms), + } + + return []map[string]interface{}{cfg} +} + +func expandEndpointDeploymentConfigAutoRollbackConfigAlarms(configured []interface{}) []*sagemaker.Alarm { + if len(configured) == 0 { + return nil + } + + alarms := make([]*sagemaker.Alarm, 0, len(configured)) + + for _, alarmRaw := range configured { + + m := alarmRaw.(map[string]interface{}) + + alarm := &sagemaker.Alarm{ + AlarmName: aws.String(m["alarm_name"].(string)), + } + + alarms = append(alarms, alarm) + } + + return alarms +} + +func flattenEndpointDeploymentConfigAutoRollbackConfigAlarms(configured []*sagemaker.Alarm) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(configured)) + + for _, i := range configured { + l := map[string]interface{}{ + "alarm_name": aws.StringValue(i.AlarmName), + } + + result = append(result, l) + } + return result +} diff --git a/internal/service/sagemaker/endpoint_test.go b/internal/service/sagemaker/endpoint_test.go index 8dc19d2ec5d7..42cf0b1882b3 100644 --- a/internal/service/sagemaker/endpoint_test.go +++ b/internal/service/sagemaker/endpoint_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfsagemaker "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" ) func TestAccSageMakerEndpoint_basic(t *testing.T) { @@ -29,7 +30,10 @@ func TestAccSageMakerEndpoint_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckSagemakerEndpointExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("endpoint/%s", rName)), resource.TestCheckResourceAttr(resourceName, "endpoint_config_name", rName), + resource.TestCheckResourceAttr(resourceName, "deployment_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -112,6 +116,82 @@ func TestAccSageMakerEndpoint_tags(t *testing.T) { }) } +func TestAccSageMakerEndpoint_deploymentConfig(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_endpoint.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckSagemakerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSagemakerEndpointDeploymentBasicConfig(rName, "ALL_AT_ONCE", 60), + Check: resource.ComposeTestCheckFunc( + testAccCheckSagemakerEndpointExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "deployment_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.auto_rollback_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.termination_wait_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.type", "ALL_AT_ONCE"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.wait_interval_in_seconds", "60"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.canary_size.#", "0"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.linear_step_size.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + // { + // Config: testAccSagemakerEndpointDeploymentBasicConfig(rName, "LINEAR", 120), + // Check: resource.ComposeTestCheckFunc( + // testAccCheckSagemakerEndpointExists(resourceName), + // resource.TestCheckResourceAttr(resourceName, "name", rName), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.auto_rollback_configuration.#", "0"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.termination_wait_in_seconds", "0"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.type", "LINEAR"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.wait_interval_in_seconds", "120"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.canary_size.#", "0"), + // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.linear_step_size.#", "0"), + // ), + // }, + }, + }) +} + +func TestAccSageMakerEndpoint_disappears(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_endpoint.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckSagemakerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSagemakerEndpointConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckSagemakerEndpointExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfsagemaker.ResourceEndpoint(), resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfsagemaker.ResourceEndpoint(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckSagemakerEndpointDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn @@ -306,3 +386,21 @@ resource "aws_sagemaker_endpoint" "test" { } `, rName) } + +func testAccSagemakerEndpointDeploymentBasicConfig(rName, tType string, wait int) string { + return testAccSagemakerEndpointConfig_Base(rName) + fmt.Sprintf(` +resource "aws_sagemaker_endpoint" "test" { + endpoint_config_name = aws_sagemaker_endpoint_configuration.test.name + name = %[1]q + + deployment_config { + blue_green_update_policy { + traffic_routing_configuration { + type = %[2]q + wait_interval_in_seconds = %[3]d + } + } + } +} +`, rName, tType, wait) +} diff --git a/internal/service/sagemaker/find.go b/internal/service/sagemaker/find.go index 67be5a8eb1db..14474c6d9846 100644 --- a/internal/service/sagemaker/find.go +++ b/internal/service/sagemaker/find.go @@ -325,6 +325,35 @@ func FindHumanTaskUIByName(conn *sagemaker.SageMaker, name string) (*sagemaker.D return output, nil } +func FindEndpointByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeEndpointOutput, error) { + input := &sagemaker.DescribeEndpointInput{ + EndpointName: aws.String(name), + } + + output, err := conn.DescribeEndpoint(input) + + if tfawserr.ErrMessageContains(err, ErrCodeValidationException, "Could not find endpoint") { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + if aws.StringValue(output.EndpointStatus) == sagemaker.EndpointStatusDeleting { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func FindEndpointConfigByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeEndpointConfigOutput, error) { input := &sagemaker.DescribeEndpointConfigInput{ EndpointConfigName: aws.String(name), From 7df8ac5d7fa90925d6398455c17ee7cff56eb250 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 22:48:36 +0200 Subject: [PATCH 151/304] use finder --- internal/service/sagemaker/endpoint_test.go | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/internal/service/sagemaker/endpoint_test.go b/internal/service/sagemaker/endpoint_test.go index 42cf0b1882b3..1e447448a960 100644 --- a/internal/service/sagemaker/endpoint_test.go +++ b/internal/service/sagemaker/endpoint_test.go @@ -4,15 +4,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfsagemaker "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccSageMakerEndpoint_basic(t *testing.T) { @@ -200,13 +199,9 @@ func testAccCheckSagemakerEndpointDestroy(s *terraform.State) error { continue } - describeInput := &sagemaker.DescribeEndpointInput{ - EndpointName: aws.String(rs.Primary.ID), - } - - _, err := conn.DescribeEndpoint(describeInput) + _, err := tfsagemaker.FindEndpointByName(conn, rs.Primary.ID) - if tfawserr.ErrMessageContains(err, "ValidationException", "") { + if tfresource.NotFound(err) { continue } @@ -231,10 +226,7 @@ func testAccCheckSagemakerEndpointExists(n string) resource.TestCheckFunc { } conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerConn - opts := &sagemaker.DescribeEndpointInput{ - EndpointName: aws.String(rs.Primary.ID), - } - _, err := conn.DescribeEndpoint(opts) + _, err := tfsagemaker.FindEndpointByName(conn, rs.Primary.ID) if err != nil { return err } From 70fd75e60a5b01ce1e615afc6729e5d9381e80c5 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 23:16:24 +0200 Subject: [PATCH 152/304] docs --- .../docs/r/sagemaker_endpoint.html.markdown | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/website/docs/r/sagemaker_endpoint.html.markdown b/website/docs/r/sagemaker_endpoint.html.markdown index cd298aa8a3fb..397fe6f9ee6c 100644 --- a/website/docs/r/sagemaker_endpoint.html.markdown +++ b/website/docs/r/sagemaker_endpoint.html.markdown @@ -30,9 +30,46 @@ resource "aws_sagemaker_endpoint" "e" { The following arguments are supported: * `endpoint_config_name` - (Required) The name of the endpoint configuration to use. +* `deployment_config` - (Optional) The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. See [Deployment Config](#deployment-config). * `name` - (Optional) The name of the endpoint. If omitted, Terraform will assign a random, unique name. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### Deployment Config + +* `blue_green_update_policy` - (Required) Update policy for a blue/green deployment. If this update policy is specified, SageMaker creates a new fleet during the deployment while maintaining the old fleet. See [Blue Green Update Config](#blue-green-update-policy). +* `auto_rollback_configuration` - (Optional) Automatic rollback configuration for handling endpoint deployment failures and recovery. See [Auto Rollback Configuration](#auto-rollback-configuration). + +#### Blue Green Update Config + +* `traffic_routing_configuration` - (Required) Defines the traffic routing strategy to shift traffic from the old fleet to the new fleet during an endpoint deployment. See [Traffic Routing Configuration](#traffic-routing-configuration). +* `maximum_execution_timeout_in_seconds` - (Optional) Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in `termination_wait_in_seconds` and `wait_interval_in_seconds`. Valid values are between `600` and `14400`. +* `termination_wait_in_seconds` - (Optional) Additional waiting time in seconds after the completion of an endpoint deployment before terminating the old endpoint fleet. Default is `0`. Valid values are between `0` and `3600`. + +##### Traffic Routing Configuration + +* `type` - (Required) Traffic routing strategy type. Valid values are: `ALL_AT_ONCE`, `CANARY`, and `LINEAR`. +* `wait_interval_in_seconds` - (Required) The waiting time (in seconds) between incremental steps to turn on traffic on the new endpoint fleet. Valid values are between `0` and `3600`. +* `canary_size` - (Optional) Batch size for the first step to turn on traffic on the new endpoint fleet. Value must be less than or equal to 50% of the variant's total instance count. See [Canary Size](#canary-size). +* `linear_step_size` - (Optional) Batch size for each step to turn on traffic on the new endpoint fleet. Value must be 10-50% of the variant's total instance count. See [Linear Step Size](#linear-step-size). + +###### Canary Size + +* `type` - (Required) Specifies the endpoint capacity type. Valid values are: `INSTANCE_COUNT`, or `CAPACITY_PERCENT`. +* `value` - (Required) Defines the capacity size, either as a number of instances or a capacity percentage. + +###### Linear Step Size + +* `type` - (Required) Specifies the endpoint capacity type. Valid values are: `INSTANCE_COUNT`, or `CAPACITY_PERCENT`. +* `value` - (Required) Defines the capacity size, either as a number of instances or a capacity percentage. + +#### Auto Rollback Configuration + +* `alarms` - (Required) List of CloudWatch alarms in your account that are configured to monitor metrics on an endpoint. If any alarms are tripped during a deployment, SageMaker rolls back the deployment. See [Alarms](#alarms). + +##### Alarms + +* `alarm_name` - (Required) The name of a CloudWatch alarm in your account. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 10edb575f69d239b61528217d9ed363850a45dff Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 23:25:53 +0200 Subject: [PATCH 153/304] add more tests --- internal/service/sagemaker/endpoint.go | 48 ++++----- internal/service/sagemaker/endpoint_test.go | 102 ++++++++++++++++---- 2 files changed, 109 insertions(+), 41 deletions(-) diff --git a/internal/service/sagemaker/endpoint.go b/internal/service/sagemaker/endpoint.go index 9d11700f1712..99276d3bbcb0 100644 --- a/internal/service/sagemaker/endpoint.go +++ b/internal/service/sagemaker/endpoint.go @@ -37,6 +37,30 @@ func ResourceEndpoint() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "auto_rollback_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarms": { + Type: schema.TypeSet, + Optional: true, + MinItems: 1, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, "blue_green_update_policy": { Type: schema.TypeList, Required: true, @@ -114,30 +138,6 @@ func ResourceEndpoint() *schema.Resource { }, }, }, - "auto_rollback_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alarms": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: 10, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alarm_name": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, }, }, }, diff --git a/internal/service/sagemaker/endpoint_test.go b/internal/service/sagemaker/endpoint_test.go index 1e447448a960..0e379de75c0a 100644 --- a/internal/service/sagemaker/endpoint_test.go +++ b/internal/service/sagemaker/endpoint_test.go @@ -147,23 +147,45 @@ func TestAccSageMakerEndpoint_deploymentConfig(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - // { - // Config: testAccSagemakerEndpointDeploymentBasicConfig(rName, "LINEAR", 120), - // Check: resource.ComposeTestCheckFunc( - // testAccCheckSagemakerEndpointExists(resourceName), - // resource.TestCheckResourceAttr(resourceName, "name", rName), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.auto_rollback_configuration.#", "0"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.termination_wait_in_seconds", "0"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.type", "LINEAR"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.wait_interval_in_seconds", "120"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.canary_size.#", "0"), - // resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.linear_step_size.#", "0"), - // ), - // }, + }, + }) +} + +func TestAccSageMakerEndpoint_deploymentConfig_full(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_endpoint.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, sagemaker.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckSagemakerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSagemakerEndpointDeploymentFullConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckSagemakerEndpointExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "deployment_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.auto_rollback_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.auto_rollback_configuration.0.alarms.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.termination_wait_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.type", "LINEAR"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.wait_interval_in_seconds", "60"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.canary_size.#", "0"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.linear_step_size.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.linear_step_size.0.type", "INSTANCE_COUNT"), + resource.TestCheckResourceAttr(resourceName, "deployment_config.0.blue_green_update_policy.0.traffic_routing_configuration.0.linear_step_size.0.value", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -396,3 +418,49 @@ resource "aws_sagemaker_endpoint" "test" { } `, rName, tType, wait) } + +func testAccSagemakerEndpointDeploymentFullConfig(rName string) string { + return testAccSagemakerEndpointConfig_Base(rName) + fmt.Sprintf(` +resource "aws_cloudwatch_metric_alarm" "test" { + alarm_name = %[1]q + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = "2" + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = "120" + statistic = "Average" + threshold = "80" + alarm_description = "This metric monitors ec2 cpu utilization" + insufficient_data_actions = [] + + dimensions = { + InstanceId = "i-abc123" + } +} + +resource "aws_sagemaker_endpoint" "test" { + endpoint_config_name = aws_sagemaker_endpoint_configuration.test.name + name = %[1]q + + deployment_config { + blue_green_update_policy { + traffic_routing_configuration { + type = "LINEAR" + wait_interval_in_seconds = "60" + + linear_step_size { + type = "INSTANCE_COUNT" + value = 1 + } + } + } + + auto_rollback_configuration { + alarms { + alarm_name = aws_cloudwatch_metric_alarm.test.alarm_name + } + } + } +} +`, rName) +} From 81cb8d598096c733574332df358802d2758661ad Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 23:26:35 +0200 Subject: [PATCH 154/304] changelog --- .changelog/21765.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21765.txt diff --git a/.changelog/21765.txt b/.changelog/21765.txt new file mode 100644 index 000000000000..a9a8b1077cf6 --- /dev/null +++ b/.changelog/21765.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sagemaker_endpoint: Add `deployment_config` argument +``` \ No newline at end of file From b94bf362c20649a30f6ded3b0efe94a01b86e671 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 23:33:41 +0200 Subject: [PATCH 155/304] fix test --- internal/service/sagemaker/endpoint.go | 2 +- internal/service/sagemaker/endpoint_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/sagemaker/endpoint.go b/internal/service/sagemaker/endpoint.go index 99276d3bbcb0..cbbf0a4d4b6b 100644 --- a/internal/service/sagemaker/endpoint.go +++ b/internal/service/sagemaker/endpoint.go @@ -467,7 +467,7 @@ func expandEndpointDeploymentConfigAutoRollbackConfig(configured []interface{}) m := configured[0].(map[string]interface{}) c := &sagemaker.AutoRollbackConfig{ - Alarms: expandEndpointDeploymentConfigAutoRollbackConfigAlarms(m["alarms"].([]interface{})), + Alarms: expandEndpointDeploymentConfigAutoRollbackConfigAlarms(m["alarms"].(*schema.Set).List()), } return c diff --git a/internal/service/sagemaker/endpoint_test.go b/internal/service/sagemaker/endpoint_test.go index 0e379de75c0a..24e5994cee75 100644 --- a/internal/service/sagemaker/endpoint_test.go +++ b/internal/service/sagemaker/endpoint_test.go @@ -335,7 +335,7 @@ resource "aws_sagemaker_endpoint_configuration" "test" { name = %[1]q production_variants { - initial_instance_count = 1 + initial_instance_count = 2 initial_variant_weight = 1 instance_type = "ml.t2.medium" model_name = aws_sagemaker_model.test.name From 62fa96128fc2cb3d5065741c3d1a362bdc60e1af Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 23:35:23 +0200 Subject: [PATCH 156/304] fmt --- internal/service/sagemaker/endpoint_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/service/sagemaker/endpoint_test.go b/internal/service/sagemaker/endpoint_test.go index 24e5994cee75..cf23508ea2d5 100644 --- a/internal/service/sagemaker/endpoint_test.go +++ b/internal/service/sagemaker/endpoint_test.go @@ -444,22 +444,22 @@ resource "aws_sagemaker_endpoint" "test" { deployment_config { blue_green_update_policy { - traffic_routing_configuration { + traffic_routing_configuration { type = "LINEAR" wait_interval_in_seconds = "60" - linear_step_size { + linear_step_size { type = "INSTANCE_COUNT" value = 1 - } - } - } + } + } + } - auto_rollback_configuration { + auto_rollback_configuration { alarms { alarm_name = aws_cloudwatch_metric_alarm.test.alarm_name - } - } + } + } } } `, rName) From 6ff126fc6afcd51180f2ea71a1bc6999f28145d9 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 13 Nov 2021 23:38:14 +0200 Subject: [PATCH 157/304] fmt --- internal/service/sagemaker/endpoint_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/sagemaker/endpoint_test.go b/internal/service/sagemaker/endpoint_test.go index cf23508ea2d5..791a1bc82eba 100644 --- a/internal/service/sagemaker/endpoint_test.go +++ b/internal/service/sagemaker/endpoint_test.go @@ -409,11 +409,11 @@ resource "aws_sagemaker_endpoint" "test" { deployment_config { blue_green_update_policy { - traffic_routing_configuration { + traffic_routing_configuration { type = %[2]q wait_interval_in_seconds = %[3]d - } - } + } + } } } `, rName, tType, wait) From ddcebab86bca12a585c6e93c97263da67e4e47f3 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sun, 14 Nov 2021 01:22:24 +0200 Subject: [PATCH 158/304] add emr release label data source --- internal/provider/provider.go | 2 + .../service/emr/release_labels_data_source.go | 87 +++++++++++++ .../emr/release_labels_data_source_test.go | 122 ++++++++++++++++++ 3 files changed, 211 insertions(+) create mode 100644 internal/service/emr/release_labels_data_source.go create mode 100644 internal/service/emr/release_labels_data_source_test.go diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 3999b1ea157f..041b7f1e009b 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -518,6 +518,8 @@ func Provider() *schema.Provider { "aws_lb_target_group": elbv2.DataSourceTargetGroup(), "aws_lb": elbv2.DataSourceLoadBalancer(), + "aws_emr_release_labels": emr.DataSourceReleaseLabels(), + "aws_kinesis_firehose_delivery_stream": firehose.DataSourceDeliveryStream(), "aws_globalaccelerator_accelerator": globalaccelerator.DataSourceAccelerator(), diff --git a/internal/service/emr/release_labels_data_source.go b/internal/service/emr/release_labels_data_source.go new file mode 100644 index 000000000000..b863bac55849 --- /dev/null +++ b/internal/service/emr/release_labels_data_source.go @@ -0,0 +1,87 @@ +package emr + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" +) + +func DataSourceReleaseLabels() *schema.Resource { + return &schema.Resource{ + ReadWithoutTimeout: dataSourceReleaseLabelsRead, + Schema: map[string]*schema.Schema{ + "filters": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "application": { + Type: schema.TypeString, + Optional: true, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "release_labels": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func dataSourceReleaseLabelsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).EMRConn + + input := &emr.ListReleaseLabelsInput{} + + if v, ok := d.GetOk("filters"); ok && len(v.([]interface{})) > 0 { + input.Filters = expandReleaseLabelsFilters(v.([]interface{})) + } + + out, err := conn.ListReleaseLabels(input) + if err != nil { + return diag.FromErr(fmt.Errorf("error reading EMR Release Label: %w", err)) + } + + if len(out.ReleaseLabels) == 0 { + return diag.Errorf("no EMR release labels found") + } + + d.SetId(strings.Join(aws.StringValueSlice(out.ReleaseLabels), ",")) + d.Set("release_labels", flex.FlattenStringSet(out.ReleaseLabels)) + + return nil +} + +func expandReleaseLabelsFilters(filters []interface{}) *emr.ReleaseLabelFilter { + if len(filters) == 0 || filters[0] == nil { + return nil + } + + m := filters[0].(map[string]interface{}) + app := &emr.ReleaseLabelFilter{} + + if v, ok := m["application"].(string); ok && v != "" { + app.Application = aws.String(v) + } + + if v, ok := m["prefix"].(string); ok && v != "" { + app.Prefix = aws.String(v) + } + + return app +} diff --git a/internal/service/emr/release_labels_data_source_test.go b/internal/service/emr/release_labels_data_source_test.go new file mode 100644 index 000000000000..f66072e7e584 --- /dev/null +++ b/internal/service/emr/release_labels_data_source_test.go @@ -0,0 +1,122 @@ +package emr_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +func TestAccEMRReleaseLabels_basic(t *testing.T) { + dataSourceResourceName := "data.aws_emr_release_labels.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, emr.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccReleaseLabelsDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceResourceName, "release_labels.#"), + ), + }, + }, + }) +} + +func TestAccEMRReleaseLabels_prefix(t *testing.T) { + dataSourceResourceName := "data.aws_emr_release_labels.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, emr.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccReleaseLabelsDataSourceConfigPrefix(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceResourceName, "release_labels.#"), + ), + }, + }, + }) +} + +func TestAccEMRReleaseLabels_application(t *testing.T) { + dataSourceResourceName := "data.aws_emr_release_labels.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, emr.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccReleaseLabelsDataSourceConfigApplication(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceResourceName, "release_labels.#"), + ), + }, + }, + }) +} + +func TestAccEMRReleaseLabels_full(t *testing.T) { + dataSourceResourceName := "data.aws_emr_release_labels.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, emr.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccReleaseLabelsDataSourceConfigFull(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceResourceName, "release_labels.#"), + ), + }, + }, + }) +} + +func testAccReleaseLabelsDataSourceConfigBasic() string { + return ` +data "aws_emr_release_labels" "test" {} +` +} + +func testAccReleaseLabelsDataSourceConfigPrefix() string { + return ` +data "aws_emr_release_labels" "test" { + filters { + prefix = "emr-6" + } +} +` +} + +func testAccReleaseLabelsDataSourceConfigApplication() string { + return ` +data "aws_emr_release_labels" "test" { + filters { + application = "spark@2.1.0" + } +} +` +} + +func testAccReleaseLabelsDataSourceConfigFull() string { + return ` +data "aws_emr_release_labels" "test" { + filters { + application = "spark@2.1.0" + prefix = "emr-5" + } +} +` +} From 6087fa02bbe10ce79718c84dfee54b757970f217 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sun, 14 Nov 2021 01:27:30 +0200 Subject: [PATCH 159/304] docs --- .../emr/release_labels_data_source_test.go | 2 +- website/docs/d/emr_release_labels.markdown | 37 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 website/docs/d/emr_release_labels.markdown diff --git a/internal/service/emr/release_labels_data_source_test.go b/internal/service/emr/release_labels_data_source_test.go index f66072e7e584..252db44cc7af 100644 --- a/internal/service/emr/release_labels_data_source_test.go +++ b/internal/service/emr/release_labels_data_source_test.go @@ -115,7 +115,7 @@ func testAccReleaseLabelsDataSourceConfigFull() string { data "aws_emr_release_labels" "test" { filters { application = "spark@2.1.0" - prefix = "emr-5" + prefix = "emr-5" } } ` diff --git a/website/docs/d/emr_release_labels.markdown b/website/docs/d/emr_release_labels.markdown new file mode 100644 index 000000000000..95571abbb7a4 --- /dev/null +++ b/website/docs/d/emr_release_labels.markdown @@ -0,0 +1,37 @@ +--- +subcategory: "EMR" +layout: "aws" +page_title: "AWS: aws_emr_release_labels" +description: |- + Retrieve information about EMR Release Labels +--- + +# Data Source: aws_emr_release_labels + +Retrieve information about EMR Release Labels. + +## Example Usage + +```terraform +data "aws_emr_release_labels" "example" { + filters { + application = "spark@2.1.0" + prefix = "emr-5" + } +} +``` + +## Argument Reference + +* `filters` – (Optional) Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return. See [Filters](#filters). + +### Filters + +* `application` - (Optional) Optional release label application filter. For example, `spark@2.1.0`. +* `prefix` - (Optional) Optional release label version prefix filter. For example, `emr-5`. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `release_labels` - The returned release labels. From c132528c3b4c5cec3943bbdfde13bcac5167f993 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sun, 14 Nov 2021 01:29:16 +0200 Subject: [PATCH 160/304] changelog --- .changelog/21767.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21767.txt diff --git a/.changelog/21767.txt b/.changelog/21767.txt new file mode 100644 index 000000000000..cefde6142f70 --- /dev/null +++ b/.changelog/21767.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_emr_release_labels +``` \ No newline at end of file From 8b8d409c39b4b20fe87bb6aba325f07497daf83f Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sun, 14 Nov 2021 01:43:33 +0200 Subject: [PATCH 161/304] fix filter --- internal/service/emr/release_labels_data_source_test.go | 6 +++--- website/docs/d/emr_release_labels.markdown | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/emr/release_labels_data_source_test.go b/internal/service/emr/release_labels_data_source_test.go index 252db44cc7af..ca2836f04a0f 100644 --- a/internal/service/emr/release_labels_data_source_test.go +++ b/internal/service/emr/release_labels_data_source_test.go @@ -104,7 +104,7 @@ func testAccReleaseLabelsDataSourceConfigApplication() string { return ` data "aws_emr_release_labels" "test" { filters { - application = "spark@2.1.0" + application = "Spark@3.1.2" } } ` @@ -114,8 +114,8 @@ func testAccReleaseLabelsDataSourceConfigFull() string { return ` data "aws_emr_release_labels" "test" { filters { - application = "spark@2.1.0" - prefix = "emr-5" + application = "Spark@3.1.2" + prefix = "emr-6" } } ` diff --git a/website/docs/d/emr_release_labels.markdown b/website/docs/d/emr_release_labels.markdown index 95571abbb7a4..672b543b19d2 100644 --- a/website/docs/d/emr_release_labels.markdown +++ b/website/docs/d/emr_release_labels.markdown @@ -1,5 +1,5 @@ --- -subcategory: "EMR" +subcategory: "Elastic Map Reduce (EMR)" layout: "aws" page_title: "AWS: aws_emr_release_labels" description: |- @@ -27,7 +27,7 @@ data "aws_emr_release_labels" "example" { ### Filters -* `application` - (Optional) Optional release label application filter. For example, `spark@2.1.0`. +* `application` - (Optional) Optional release label application filter. For example, `Spark@2.1.0` or `Spark`. * `prefix` - (Optional) Optional release label version prefix filter. For example, `emr-5`. ## Attributes Reference From 15326d2dbef5a2307a74e73a63f933138e51e125 Mon Sep 17 00:00:00 2001 From: tmarlok Date: Sun, 14 Nov 2021 17:51:08 +0100 Subject: [PATCH 162/304] Add tags support to IoT Thing Types --- internal/service/iot/thing_type.go | 38 +++++++- internal/service/iot/thing_type_test.go | 112 +++++++++++++++++++++++- 2 files changed, 148 insertions(+), 2 deletions(-) diff --git a/internal/service/iot/thing_type.go b/internal/service/iot/thing_type.go index 8bd57a0e48db..0901d5184c2e 100644 --- a/internal/service/iot/thing_type.go +++ b/internal/service/iot/thing_type.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -69,17 +70,22 @@ func ResourceThingType() *schema.Resource { Optional: true, Default: false, }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), "arn": { Type: schema.TypeString, Computed: true, }, }, + + CustomizeDiff: verify.SetTagsDiff, } } func resourceThingTypeCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) params := &iot.CreateThingTypeInput{ ThingTypeName: aws.String(d.Get("name").(string)), } @@ -92,6 +98,9 @@ func resourceThingTypeCreate(d *schema.ResourceData, meta interface{}) error { params.ThingTypeProperties = expandThingTypeProperties(config) } } + if len(tags) > 0 { + params.Tags = Tags(tags.IgnoreAWS()) + } log.Printf("[DEBUG] Creating IoT Thing Type: %s", params) out, err := conn.CreateThingType(params) @@ -122,6 +131,9 @@ func resourceThingTypeCreate(d *schema.ResourceData, meta interface{}) error { func resourceThingTypeRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + params := &iot.DescribeThingTypeInput{ ThingTypeName: aws.String(d.Id()), } @@ -142,6 +154,22 @@ func resourceThingTypeRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", out.ThingTypeArn) + tags, err := ListTags(conn, aws.StringValue(out.ThingTypeArn)) + if err != nil { + return fmt.Errorf("error listing tags for IoT Thing Type (%s): %w", aws.StringValue(out.ThingTypeArn), err) + } + + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return fmt.Errorf("error setting tags_all: %w", err) + } + if err := d.Set("properties", flattenIoTThingTypeProperties(out.ThingTypeProperties)); err != nil { return fmt.Errorf("error setting properties: %s", err) } @@ -166,6 +194,14 @@ func resourceThingTypeUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceThingTypeRead(d, meta) } diff --git a/internal/service/iot/thing_type_test.go b/internal/service/iot/thing_type_test.go index 066126eb278d..033259a8d479 100644 --- a/internal/service/iot/thing_type_test.go +++ b/internal/service/iot/thing_type_test.go @@ -25,8 +25,11 @@ func TestAccIoTThingType_basic(t *testing.T) { { Config: testAccThingTypeConfig_basic(rInt), Check: resource.ComposeTestCheckFunc( + testAccCheckThingTypeExists("aws_iot_thing_type.foo"), resource.TestCheckResourceAttrSet("aws_iot_thing_type.foo", "arn"), resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "name", fmt.Sprintf("tf_acc_iot_thing_type_%d", rInt)), + resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "tags.%", "0"), + resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "tags_all.%", "0"), ), }, { @@ -50,10 +53,13 @@ func TestAccIoTThingType_full(t *testing.T) { { Config: testAccThingTypeConfig_full(rInt), Check: resource.ComposeTestCheckFunc( + testAccCheckThingTypeExists("aws_iot_thing_type.foo"), resource.TestCheckResourceAttrSet("aws_iot_thing_type.foo", "arn"), resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "properties.0.description", "MyDescription"), resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "properties.0.searchable_attributes.#", "3"), resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "deprecated", "true"), + resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "tags.%", "1"), + resource.TestCheckResourceAttr("aws_iot_thing_type.foo", "tags_all.%", "1"), ), }, { @@ -71,6 +77,76 @@ func TestAccIoTThingType_full(t *testing.T) { }) } +func TestAccIoTThingType_tags(t *testing.T) { + rName := sdkacctest.RandString(5) + resourceName := "aws_iot_thing_type.foo" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingTypeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIoTThingTypeTags1(rName, "key1", "user@example"), + Check: resource.ComposeTestCheckFunc( + testAccCheckThingTypeExists("aws_iot_thing_type.foo"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "user@example"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIoTThingTypeTags2(rName, "key1", "user@example", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckThingTypeExists("aws_iot_thing_type.foo"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "user@example"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccIoTThingTypeTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckThingTypeExists("aws_iot_thing_type.foo"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccCheckThingTypeExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn + input := &iot.ListThingTypesInput{} + + output, err := conn.ListThingTypes(input) + + if err != nil { + return err + } + + for _, rule := range output.ThingTypes { + if aws.StringValue(rule.ThingTypeName) == rs.Primary.ID { + return nil + } + } + + return fmt.Errorf("IoT Topic Rule (%s) not found", rs.Primary.ID) + } +} + func testAccCheckThingTypeDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn @@ -87,7 +163,6 @@ func testAccCheckThingTypeDestroy(s *terraform.State) error { if err == nil { return fmt.Errorf("Expected IoT Thing Type to be destroyed, %s found", rs.Primary.ID) } - } return nil @@ -111,6 +186,10 @@ resource "aws_iot_thing_type" "foo" { description = "MyDescription" searchable_attributes = ["foo", "bar", "baz"] } + + tags = { + testtag = "MyTagValue" + } } `, rName) } @@ -125,6 +204,37 @@ resource "aws_iot_thing_type" "foo" { description = "MyDescription" searchable_attributes = ["foo", "bar", "baz"] } + + tags = { + testtag = "MyTagValue" + } } `, rName) } + +func testAccIoTThingTypeTags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_type" "foo" { + name = "tf_acc_iot_thing_type_%[1]s" + deprecated = false + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccIoTThingTypeTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_type" "foo" { + name = "tf_acc_iot_thing_type_%[1]s" + deprecated = false + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} From 4ebcc482812247a8241fd6d336ca84da855d5645 Mon Sep 17 00:00:00 2001 From: tmarlok Date: Sun, 14 Nov 2021 18:03:18 +0100 Subject: [PATCH 163/304] Add tags support to IoT Thing Types - docs --- website/docs/r/iot_thing_type.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/docs/r/iot_thing_type.html.markdown b/website/docs/r/iot_thing_type.html.markdown index f17ad9c4da6e..84f1c4bbe1f7 100644 --- a/website/docs/r/iot_thing_type.html.markdown +++ b/website/docs/r/iot_thing_type.html.markdown @@ -25,6 +25,7 @@ resource "aws_iot_thing_type" "foo" { * `properties` - (Optional), Configuration block that can contain the following properties of the thing type: * `description` - (Optional, Forces New Resource) The description of the thing type. * `searchable_attributes` - (Optional, Forces New Resource) A list of searchable thing attribute names. +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attributes Reference @@ -32,6 +33,7 @@ resource "aws_iot_thing_type" "foo" { In addition to all arguments above, the following attributes are exported: * `arn` - The ARN of the created AWS IoT Thing Type. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Import From 948e4dd64e907e669d63afd31e12a96627d5730b Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Mon, 10 May 2021 14:55:05 +0200 Subject: [PATCH 164/304] first work --- resource_aws_s3_object_lambda_access_point.go | 329 ++++++++++++++++++ ..._aws_s3_object_lambda_access_point_test.go | 14 + 2 files changed, 343 insertions(+) create mode 100644 resource_aws_s3_object_lambda_access_point.go create mode 100644 resource_aws_s3_object_lambda_access_point_test.go diff --git a/resource_aws_s3_object_lambda_access_point.go b/resource_aws_s3_object_lambda_access_point.go new file mode 100644 index 000000000000..867bd5a393fc --- /dev/null +++ b/resource_aws_s3_object_lambda_access_point.go @@ -0,0 +1,329 @@ +// https://pkg.go.dev/github.com/aws/aws-sdk-go@v1.38.31/service/s3control?utm_source=gopls +// https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_access_point +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3ObjectLambdaAccessPointCreate, + Read: resourceAwsS3ObjectLambdaAccessPointRead, + Update: resourceAwsS3ObjectLambdaAccessPointUpdate, + Delete: resourceAwsS3ObjectLambdaAccessPointDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + }, + + "Allowed_features": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "cloud_watch_metrics_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + + "supporting_access_point": { + Type: schema.TypeString, + Optional: false, + }, + + "transformation_configurations": { + Type: schema.TypeList, + Optional: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeString, + Optional: false, + }, + "Content_transformation": { + Type: schema.schema.TypeList, + Optional: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aws_lambda": { + Type: schema.TypeString, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "function_arn": { + Type: schema.TypeString, + Optional: false, + }, + "function_payload": { + Type: schema.schema.TypeString, + Optional: true, + + }, + }, + }, + + }, + }, + }, + + }, + }, + }, + }, + }, +} + +func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).s3controlconn + + accountId := meta.(*AWSClient).accountid + if v, ok := d.GetOk("account_id"); ok { + accountId = v.(string) + } + name := d.Get("name").(string) + + configuration := &s3control.ObjectLambdaConfiguration{ + AllowedFeatures []*string `locationNameList:"AllowedFeature" type:"list"` + CloudWatchMetricsEnabled *bool `type:"boolean"` + SupportingAccessPoint *string `min:"1" type:"string" required:"true"` + TransformationConfigurations []*ObjectLambdaTransformationConfiguration `locationNameList:"TransformationConfiguration" type:"list" required:"true"` + } + + input := &s3control.CreateAccessPointForObjectLambdaInput{ + AccountId: aws.String(accountId), + Configuration: configuration, + Name: aws.String(name), + } + + /* + log.Printf("[DEBUG] Creating S3 Access Point: %s", input) + output, err := conn.CreateAccessPoint(input) + + if err != nil { + return fmt.Errorf("error creating S3 Control Access Point (%s): %w", name, err) + } + + if output == nil { + return fmt.Errorf("error creating S3 Control Access Point (%s): empty response", name) + } + + parsedARN, err := arn.Parse(aws.StringValue(output.AccessPointArn)) + + if err == nil && strings.HasPrefix(parsedARN.Resource, "outpost/") { + d.SetId(aws.StringValue(output.AccessPointArn)) + name = aws.StringValue(output.AccessPointArn) + } else { + d.SetId(fmt.Sprintf("%s:%s", accountId, name)) + } + + if v, ok := d.GetOk("policy"); ok { + log.Printf("[DEBUG] Putting S3 Access Point policy: %s", d.Id()) + _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + Policy: aws.String(v.(string)), + }) + + if err != nil { + return fmt.Errorf("error putting S3 Access Point (%s) policy: %s", d.Id(), err) + } + } + + return resourceAwsS3AccessPointRead(d, meta) + */ +} + +func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).s3controlconn + + accountId, name, err := s3AccessPointParseId(d.Id()) + if err != nil { + return err + } + + /* + output, err := conn.GetAccessPoint(&s3control.GetAccessPointInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchAccessPoint) { + log.Printf("[WARN] S3 Access Point (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Access Point (%s): %w", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error reading S3 Access Point (%s): empty response", d.Id()) + } + + if strings.HasPrefix(name, "arn:") { + parsedAccessPointARN, err := arn.Parse(name) + + if err != nil { + return fmt.Errorf("error parsing S3 Control Access Point ARN (%s): %w", name, err) + } + + bucketARN := arn.ARN{ + AccountID: parsedAccessPointARN.AccountID, + Partition: parsedAccessPointARN.Partition, + Region: parsedAccessPointARN.Region, + Resource: strings.Replace( + parsedAccessPointARN.Resource, + fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), + fmt.Sprintf("bucket/%s", aws.StringValue(output.Bucket)), + 1, + ), + Service: parsedAccessPointARN.Service, + } + + d.Set("arn", name) + d.Set("bucket", bucketARN.String()) + } else { + accessPointARN := arn.ARN{ + AccountID: accountId, + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), + Service: "s3", + } + + d.Set("arn", accessPointARN.String()) + d.Set("bucket", output.Bucket) + } + + d.Set("account_id", accountId) + d.Set("domain_name", meta.(*AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountId))) + d.Set("name", output.Name) + d.Set("network_origin", output.NetworkOrigin) + if err := d.Set("public_access_block_configuration", flattenS3AccessPointPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)); err != nil { + return fmt.Errorf("error setting public_access_block_configuration: %s", err) + } + if err := d.Set("vpc_configuration", flattenS3AccessPointVpcConfiguration(output.VpcConfiguration)); err != nil { + return fmt.Errorf("error setting vpc_configuration: %s", err) + } + + policyOutput, err := conn.GetAccessPointPolicy(&s3control.GetAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if isAWSErr(err, "NoSuchAccessPointPolicy", "") { + d.Set("policy", "") + } else { + if err != nil { + return fmt.Errorf("error reading S3 Access Point (%s) policy: %s", d.Id(), err) + } + + d.Set("policy", policyOutput.Policy) + } + + // Return early since S3 on Outposts cannot have public policies + if strings.HasPrefix(name, "arn:") { + d.Set("has_public_access_policy", false) + + return nil + } + + policyStatusOutput, err := conn.GetAccessPointPolicyStatus(&s3control.GetAccessPointPolicyStatusInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if isAWSErr(err, "NoSuchAccessPointPolicy", "") { + d.Set("has_public_access_policy", false) + } else { + if err != nil { + return fmt.Errorf("error reading S3 Access Point (%s) policy status: %s", d.Id(), err) + } + + d.Set("has_public_access_policy", policyStatusOutput.PolicyStatus.IsPublic) + } + */ + + return nil +} + +func resourceAwsS3ObjectLambdaAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).s3controlconn + + accountId, name, err := s3AccessPointParseId(d.Id()) + if err != nil { + return err + } + + /* + if d.HasChange("policy") { + if v, ok := d.GetOk("policy"); ok { + log.Printf("[DEBUG] Putting S3 Access Point policy: %s", d.Id()) + _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + Policy: aws.String(v.(string)), + }) + + if err != nil { + return fmt.Errorf("error putting S3 Access Point (%s) policy: %s", d.Id(), err) + } + } else { + log.Printf("[DEBUG] Deleting S3 Access Point policy: %s", d.Id()) + _, err := conn.DeleteAccessPointPolicy(&s3control.DeleteAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if err != nil { + return fmt.Errorf("error deleting S3 Access Point (%s) policy: %s", d.Id(), err) + } + } + } + */ + + return resourceAwsS3AccessPointRead(d, meta) +} + +func resourceAwsS3ObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).s3controlconn + + accountId, name, err := s3AccessPointParseId(d.Id()) + if err != nil { + return err + } + + /* + log.Printf("[DEBUG] Deleting S3 Access Point: %s", d.Id()) + _, err = conn.DeleteAccessPoint(&s3control.DeleteAccessPointInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if isAWSErr(err, "NoSuchAccessPoint", "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting S3 Access Point (%s): %s", d.Id(), err) + } + */ + + return nil +} diff --git a/resource_aws_s3_object_lambda_access_point_test.go b/resource_aws_s3_object_lambda_access_point_test.go new file mode 100644 index 000000000000..e6c8421ccc65 --- /dev/null +++ b/resource_aws_s3_object_lambda_access_point_test.go @@ -0,0 +1,14 @@ +package aws + +import ( + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + awspolicy "github.com/jen20/awspolicyequivalence" +) From eede8dd84f3a7c6e392e3a92f2eddcfe37a5389f Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 12 May 2021 13:41:03 +0200 Subject: [PATCH 165/304] progress --- resource_aws_s3_object_lambda_access_point.go | 415 ++++++----- ..._aws_s3_object_lambda_access_point_test.go | 697 ++++++++++++++++++ 2 files changed, 938 insertions(+), 174 deletions(-) diff --git a/resource_aws_s3_object_lambda_access_point.go b/resource_aws_s3_object_lambda_access_point.go index 867bd5a393fc..c6972b8f72c4 100644 --- a/resource_aws_s3_object_lambda_access_point.go +++ b/resource_aws_s3_object_lambda_access_point.go @@ -3,9 +3,16 @@ package aws import ( + "fmt" + "log" + "strings" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tfs3control "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3control" ) func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { @@ -27,28 +34,27 @@ func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { ForceNew: true, ValidateFunc: validateAwsAccountId, }, - - "Allowed_features": { - Type: schema.TypeList, + + "allowed_features": { + Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, + Elem: &schema.Schema{Type: schema.TypeString}, + MaxItems: 2, }, "cloud_watch_metrics_enabled": { - Type: schema.TypeBool, + Type: schema.TypeBool, Optional: true, }, "supporting_access_point": { - Type: schema.TypeString, - Optional: false, + Type: schema.TypeString, + Optional: false, }, "transformation_configurations": { - Type: schema.TypeList, - Optional: false, + Type: schema.TypeList, + Optional: false, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "actions": { @@ -56,7 +62,7 @@ func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { Optional: false, }, "Content_transformation": { - Type: schema.schema.TypeList, + Type: schema.TypeList, Optional: false, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -70,22 +76,24 @@ func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { Optional: false, }, "function_payload": { - Type: schema.schema.TypeString, + Type: schema.TypeString, Optional: true, - + }, }, }, }, - }, }, }, - }, }, }, }, - }, + } +} + +func expandObjectLambdaTransformationConfiguration(tConfig []interface{}) *s3control.ObjectLambdaTransformationConfiguration { + return &s3control.ObjectLambdaTransformationConfiguration{} } func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta interface{}) error { @@ -98,10 +106,10 @@ func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta int name := d.Get("name").(string) configuration := &s3control.ObjectLambdaConfiguration{ - AllowedFeatures []*string `locationNameList:"AllowedFeature" type:"list"` - CloudWatchMetricsEnabled *bool `type:"boolean"` - SupportingAccessPoint *string `min:"1" type:"string" required:"true"` - TransformationConfigurations []*ObjectLambdaTransformationConfiguration `locationNameList:"TransformationConfiguration" type:"list" required:"true"` + AllowedFeatures: expandStringSet(d.Get("allowed_features").(*schema.Set)), + CloudWatchMetricsEnabled: aws.Bool(d.Get("cloud_watch_metrics_enabled").(bool)), + SupportingAccessPoint: aws.String(d.Get("supporting_access_point").(string)), + TransformationConfigurations: expandObjectLambdaTransformationConfiguration(d.Get("transformation_configurations").([]interface{})), } input := &s3control.CreateAccessPointForObjectLambdaInput{ @@ -110,155 +118,151 @@ func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta int Name: aws.String(name), } - /* - log.Printf("[DEBUG] Creating S3 Access Point: %s", input) - output, err := conn.CreateAccessPoint(input) + log.Printf("[DEBUG] Creating S3 Object Lambda Access Point: %s", input) + output, err := conn.CreateAccessPointForObjectLambda()(input) - if err != nil { - return fmt.Errorf("error creating S3 Control Access Point (%s): %w", name, err) - } + if err != nil { + return fmt.Errorf("error creating S3 Control Access Point (%s): %w", name, err) + } - if output == nil { - return fmt.Errorf("error creating S3 Control Access Point (%s): empty response", name) - } + if output == nil { + return fmt.Errorf("error creating S3 Control Access Point (%s): empty response", name) + } - parsedARN, err := arn.Parse(aws.StringValue(output.AccessPointArn)) + parsedARN, err := arn.Parse(aws.StringValue(output.AccessPointArn)) - if err == nil && strings.HasPrefix(parsedARN.Resource, "outpost/") { - d.SetId(aws.StringValue(output.AccessPointArn)) - name = aws.StringValue(output.AccessPointArn) - } else { - d.SetId(fmt.Sprintf("%s:%s", accountId, name)) - } + if err == nil && strings.HasPrefix(parsedARN.Resource, "outpost/") { + d.SetId(aws.StringValue(output.AccessPointArn)) + name = aws.StringValue(output.AccessPointArn) + } else { + d.SetId(fmt.Sprintf("%s:%s", accountId, name)) + } - if v, ok := d.GetOk("policy"); ok { - log.Printf("[DEBUG] Putting S3 Access Point policy: %s", d.Id()) - _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - Policy: aws.String(v.(string)), - }) + if v, ok := d.GetOk("policy"); ok { + log.Printf("[DEBUG] Putting S3 Object Lambda Access Point policy: %s", d.Id()) + _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + Policy: aws.String(v.(string)), + }) - if err != nil { - return fmt.Errorf("error putting S3 Access Point (%s) policy: %s", d.Id(), err) - } + if err != nil { + return fmt.Errorf("error putting S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) } + } - return resourceAwsS3AccessPointRead(d, meta) - */ + return resourceAwsS3ObjectLambdaAccessPointRead(d, meta) } func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).s3controlconn - accountId, name, err := s3AccessPointParseId(d.Id()) + accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) if err != nil { return err } - /* - output, err := conn.GetAccessPoint(&s3control.GetAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchAccessPoint) { - log.Printf("[WARN] S3 Access Point (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - - if err != nil { - return fmt.Errorf("error reading S3 Access Point (%s): %w", d.Id(), err) - } - - if output == nil { - return fmt.Errorf("error reading S3 Access Point (%s): empty response", d.Id()) - } + output, err := conn.GetAccessPoint(&s3control.GetAccessPointInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) - if strings.HasPrefix(name, "arn:") { - parsedAccessPointARN, err := arn.Parse(name) + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchAccessPoint) { + log.Printf("[WARN] S3 Object Lambda Access Point (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } - if err != nil { - return fmt.Errorf("error parsing S3 Control Access Point ARN (%s): %w", name, err) - } + if err != nil { + return fmt.Errorf("error reading S3 Object Lambda Access Point (%s): %w", d.Id(), err) + } - bucketARN := arn.ARN{ - AccountID: parsedAccessPointARN.AccountID, - Partition: parsedAccessPointARN.Partition, - Region: parsedAccessPointARN.Region, - Resource: strings.Replace( - parsedAccessPointARN.Resource, - fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), - fmt.Sprintf("bucket/%s", aws.StringValue(output.Bucket)), - 1, - ), - Service: parsedAccessPointARN.Service, - } + if output == nil { + return fmt.Errorf("error reading S3 Object Lambda Access Point (%s): empty response", d.Id()) + } - d.Set("arn", name) - d.Set("bucket", bucketARN.String()) - } else { - accessPointARN := arn.ARN{ - AccountID: accountId, - Partition: meta.(*AWSClient).partition, - Region: meta.(*AWSClient).region, - Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), - Service: "s3", - } + if strings.HasPrefix(name, "arn:") { + parsedAccessPointARN, err := arn.Parse(name) - d.Set("arn", accessPointARN.String()) - d.Set("bucket", output.Bucket) + if err != nil { + return fmt.Errorf("error parsing S3 Control Access Point ARN (%s): %w", name, err) } - d.Set("account_id", accountId) - d.Set("domain_name", meta.(*AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountId))) - d.Set("name", output.Name) - d.Set("network_origin", output.NetworkOrigin) - if err := d.Set("public_access_block_configuration", flattenS3AccessPointPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)); err != nil { - return fmt.Errorf("error setting public_access_block_configuration: %s", err) + bucketARN := arn.ARN{ + AccountID: parsedAccessPointARN.AccountID, + Partition: parsedAccessPointARN.Partition, + Region: parsedAccessPointARN.Region, + Resource: strings.Replace( + parsedAccessPointARN.Resource, + fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), + fmt.Sprintf("bucket/%s", aws.StringValue(output.Bucket)), + 1, + ), + Service: parsedAccessPointARN.Service, } - if err := d.Set("vpc_configuration", flattenS3AccessPointVpcConfiguration(output.VpcConfiguration)); err != nil { - return fmt.Errorf("error setting vpc_configuration: %s", err) + + d.Set("arn", name) + d.Set("bucket", bucketARN.String()) + } else { + accessPointARN := arn.ARN{ + AccountID: accountId, + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), + Service: "s3", } - policyOutput, err := conn.GetAccessPointPolicy(&s3control.GetAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + d.Set("arn", accessPointARN.String()) + d.Set("bucket", output.Bucket) + } - if isAWSErr(err, "NoSuchAccessPointPolicy", "") { - d.Set("policy", "") - } else { - if err != nil { - return fmt.Errorf("error reading S3 Access Point (%s) policy: %s", d.Id(), err) - } + d.Set("account_id", accountId) + d.Set("domain_name", meta.(*AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountId))) + d.Set("name", output.Name) + d.Set("network_origin", output.NetworkOrigin) + if err := d.Set("public_access_block_configuration", flattenS3ObjectLambdaAccessPointPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)); err != nil { + return fmt.Errorf("error setting public_access_block_configuration: %s", err) + } + if err := d.Set("vpc_configuration", flattenS3ObjectLambdaAccessPointVpcConfiguration(output.VpcConfiguration)); err != nil { + return fmt.Errorf("error setting vpc_configuration: %s", err) + } + + policyOutput, err := conn.GetAccessPointPolicy(&s3control.GetAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) - d.Set("policy", policyOutput.Policy) + if isAWSErr(err, "NoSuchAccessPointPolicy", "") { + d.Set("policy", "") + } else { + if err != nil { + return fmt.Errorf("error reading S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) } - // Return early since S3 on Outposts cannot have public policies - if strings.HasPrefix(name, "arn:") { - d.Set("has_public_access_policy", false) + d.Set("policy", policyOutput.Policy) + } - return nil - } + // Return early since S3 on Outposts cannot have public policies + if strings.HasPrefix(name, "arn:") { + d.Set("has_public_access_policy", false) - policyStatusOutput, err := conn.GetAccessPointPolicyStatus(&s3control.GetAccessPointPolicyStatusInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + return nil + } - if isAWSErr(err, "NoSuchAccessPointPolicy", "") { - d.Set("has_public_access_policy", false) - } else { - if err != nil { - return fmt.Errorf("error reading S3 Access Point (%s) policy status: %s", d.Id(), err) - } + policyStatusOutput, err := conn.GetAccessPointPolicyStatus(&s3control.GetAccessPointPolicyStatusInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) - d.Set("has_public_access_policy", policyStatusOutput.PolicyStatus.IsPublic) + if isAWSErr(err, "NoSuchAccessPointPolicy", "") { + d.Set("has_public_access_policy", false) + } else { + if err != nil { + return fmt.Errorf("error reading S3 Object Lambda Access Point (%s) policy status: %s", d.Id(), err) } - */ + + d.Set("has_public_access_policy", policyStatusOutput.PolicyStatus.IsPublic) + } return nil } @@ -266,64 +270,127 @@ func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta inter func resourceAwsS3ObjectLambdaAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).s3controlconn - accountId, name, err := s3AccessPointParseId(d.Id()) + accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) if err != nil { return err } - /* - if d.HasChange("policy") { - if v, ok := d.GetOk("policy"); ok { - log.Printf("[DEBUG] Putting S3 Access Point policy: %s", d.Id()) - _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - Policy: aws.String(v.(string)), - }) - - if err != nil { - return fmt.Errorf("error putting S3 Access Point (%s) policy: %s", d.Id(), err) - } - } else { - log.Printf("[DEBUG] Deleting S3 Access Point policy: %s", d.Id()) - _, err := conn.DeleteAccessPointPolicy(&s3control.DeleteAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - - if err != nil { - return fmt.Errorf("error deleting S3 Access Point (%s) policy: %s", d.Id(), err) - } + if d.HasChange("policy") { + if v, ok := d.GetOk("policy"); ok { + log.Printf("[DEBUG] Putting S3 Object Lambda Access Point policy: %s", d.Id()) + _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + Policy: aws.String(v.(string)), + }) + + if err != nil { + return fmt.Errorf("error putting S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) + } + } else { + log.Printf("[DEBUG] Deleting S3 Object Lambda Access Point policy: %s", d.Id()) + _, err := conn.DeleteAccessPointForObjectLambdaPolicy(&s3control.DeleteAccessPointForObjectLambdaPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + + if err != nil { + return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) } } - */ + } - return resourceAwsS3AccessPointRead(d, meta) + return resourceAwsS3ObjectLambdaAccessPointRead(d, meta) } func resourceAwsS3ObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).s3controlconn - accountId, name, err := s3AccessPointParseId(d.Id()) + accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) if err != nil { return err } - /* - log.Printf("[DEBUG] Deleting S3 Access Point: %s", d.Id()) - _, err = conn.DeleteAccessPoint(&s3control.DeleteAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + log.Printf("[DEBUG] Deleting S3 Object Lambda Access Point: %s", d.Id()) + _, err = conn.DeleteAccessPointForObjectLambda(&s3control.DeleteAccessPointForObjectLambdaInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) - if isAWSErr(err, "NoSuchAccessPoint", "") { - return nil - } + if isAWSErr(err, "NoSuchAccessPoint", "") { + return nil + } - if err != nil { - return fmt.Errorf("error deleting S3 Access Point (%s): %s", d.Id(), err) - } - */ + if err != nil { + return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %s", d.Id(), err) + } return nil } + +// S3ObjectLambdaAccessPointParseId returns the Account ID and Access Point Name (S3) or ARN (S3 on Outposts) +func S3ObjectLambdaAccessPointParseId(id string) (string, string, error) { + parsedARN, err := arn.Parse(id) + + if err == nil { + return parsedARN.AccountID, id, nil + } + + parts := strings.SplitN(id, ":", 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%s), expected ACCOUNT_ID:NAME", id) + } + + return parts[0], parts[1], nil +} + +func expandS3ObjectLambdaAccessPointVpcConfiguration(vConfig []interface{}) *s3control.VpcConfiguration { + if len(vConfig) == 0 || vConfig[0] == nil { + return nil + } + + mConfig := vConfig[0].(map[string]interface{}) + + return &s3control.VpcConfiguration{ + VpcId: aws.String(mConfig["vpc_id"].(string)), + } +} + +func flattenS3ObjectLambdaAccessPointVpcConfiguration(config *s3control.VpcConfiguration) []interface{} { + if config == nil { + return []interface{}{} + } + + return []interface{}{map[string]interface{}{ + "vpc_id": aws.StringValue(config.VpcId), + }} +} + +func expandS3ObjectLambdaAccessPointPublicAccessBlockConfiguration(vConfig []interface{}) *s3control.PublicAccessBlockConfiguration { + if len(vConfig) == 0 || vConfig[0] == nil { + return nil + } + + mConfig := vConfig[0].(map[string]interface{}) + + return &s3control.PublicAccessBlockConfiguration{ + BlockPublicAcls: aws.Bool(mConfig["block_public_acls"].(bool)), + BlockPublicPolicy: aws.Bool(mConfig["block_public_policy"].(bool)), + IgnorePublicAcls: aws.Bool(mConfig["ignore_public_acls"].(bool)), + RestrictPublicBuckets: aws.Bool(mConfig["restrict_public_buckets"].(bool)), + } +} + +func flattenS3ObjectLambdaAccessPointPublicAccessBlockConfiguration(config *s3control.PublicAccessBlockConfiguration) []interface{} { + if config == nil { + return []interface{}{} + } + + return []interface{}{map[string]interface{}{ + "block_public_acls": aws.BoolValue(config.BlockPublicAcls), + "block_public_policy": aws.BoolValue(config.BlockPublicPolicy), + "ignore_public_acls": aws.BoolValue(config.IgnorePublicAcls), + "restrict_public_buckets": aws.BoolValue(config.RestrictPublicBuckets), + }} +} diff --git a/resource_aws_s3_object_lambda_access_point_test.go b/resource_aws_s3_object_lambda_access_point_test.go index e6c8421ccc65..831f09c13d7a 100644 --- a/resource_aws_s3_object_lambda_access_point_test.go +++ b/resource_aws_s3_object_lambda_access_point_test.go @@ -3,12 +3,709 @@ package aws import ( "fmt" "log" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" awspolicy "github.com/jen20/awspolicyequivalence" ) + +func init() { + resource.AddTestSweepers("aws_s3_object_lambda_access_point", &resource.Sweeper{ + Name: "aws_s3_object_lambda_access_point", + F: testSweepS3ObjectLambdaAccessPoints, + }) +} + +func testSweepS3ObjectLambdaAccessPoints(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + + accountId := client.(*AWSClient).accountid + conn := client.(*AWSClient).s3controlconn + + input := &s3control.ListAccessPointsForObjectLambdaInput{ + AccountId: aws.String(accountId), + } + var sweeperErrs *multierror.Error + + conn.ListAccessPointsForObjectLambdaPages(input, func(page *s3control.ListAccessPointsForObjectLambdaOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, ObjectLambdaAccessPoint := range page.ObjectLambdaAccessPointList { + input := &s3control.DeleteAccessPointForObjectLambdaInput{ + AccountId: aws.String(accountId), + Name: ObjectLambdaAccessPoint.Name, + } + name := aws.StringValue(ObjectLambdaAccessPoint.Name) + + log.Printf("[INFO] Deleting S3 Object Lambda Access Point: %s", name) + _, err := conn.DeleteAccessPointForObjectLambda(input) + + if isAWSErr(err, "NoSuchAccessPoint", "") { + continue + } + + if err != nil { + sweeperErr := fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %w", name, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping S3 Object Lambda Access Point sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing S3 Object Lambda Access Points: %w", err) + } + + return sweeperErrs.ErrorOrNil() +} + +func TestAccAWSS3ObjectLambdaAccessPoint_basic(t *testing.T) { + var v s3control.GetAccessPointForObjectLambdaOutput + bucketName := acctest.RandomWithPrefix("tf-acc-test") + accessPointName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_object_lambda_access_point.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckResourceAttrAccountID(resourceName, "account_id"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", accessPointName)), + resource.TestCheckResourceAttr(resourceName, "bucket", bucketName), + testAccMatchResourceAttrRegionalHostname(resourceName, "domain_name", "s3-accesspoint", regexp.MustCompile(fmt.Sprintf("^%s-\\d{12}", accessPointName))), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "name", accessPointName), + resource.TestCheckResourceAttr(resourceName, "network_origin", "Internet"), + resource.TestCheckResourceAttr(resourceName, "policy", ""), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "true"), + resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSS3ObjectLambdaAccessPoint_disappears(t *testing.T) { + var v s3control.GetAccessPointForObjectLambdaOutput + bucketName := acctest.RandomWithPrefix("tf-acc-test") + accessPointName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_object_lambda_access_point.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckAWSS3ObjectLambdaAccessPointDisappears(resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSS3ObjectLambdaAccessPoint_disappears_Bucket(t *testing.T) { + var v s3control.GetAccessPointForObjectLambdaOutput + bucketName := acctest.RandomWithPrefix("tf-acc-test") + accessPointName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_object_lambda_access_point.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckAWSS3DestroyBucket(bucketResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSS3ObjectLambdaAccessPoint_Bucket_Arn(t *testing.T) { + var v s3control.GetAccessPointForObjectLambdaOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_object_lambda_access_point.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSOutpostsOutposts(t) }, + ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_Bucket_Arn(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckResourceAttrAccountID(resourceName, "account_id"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3-outposts", fmt.Sprintf("outpost/[^/]+/accesspoint/%s", rName)), + resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3control_bucket.test", "arn"), + testAccMatchResourceAttrRegionalHostname(resourceName, "domain_name", "s3-accesspoint", regexp.MustCompile(fmt.Sprintf("^%s-\\d{12}", rName))), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "network_origin", "Vpc"), + resource.TestCheckResourceAttr(resourceName, "policy", ""), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "true"), + resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_configuration.0.vpc_id", "aws_vpc.test", "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSS3ObjectLambdaAccessPoint_Policy(t *testing.T) { + var v s3control.GetAccessPointForObjectLambdaOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_object_lambda_access_point.test" + + expectedPolicyText1 := func() string { + return fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": "s3:GetObjectTagging", + "Resource": [ + "arn:%s:s3:%s:%s:accesspoint/%s/object/*" + ] + } + ] +}`, testAccGetPartition(), testAccGetRegion(), testAccGetAccountID(), rName) + } + expectedPolicyText2 := func() string { + return fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:GetObjectLegalHold", + "s3:GetObjectRetention" + ], + "Resource": [ + "arn:%s:s3:%s:%s:accesspoint/%s/object/*" + ] + } + ] +}`, testAccGetPartition(), testAccGetRegion(), testAccGetAccountID(), rName) + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_policy(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckAWSS3ObjectLambdaAccessPointHasPolicy(resourceName, expectedPolicyText1), + testAccCheckResourceAttrAccountID(resourceName, "account_id"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "bucket", rName), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "true"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "network_origin", "Internet"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "false"), + resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_policyUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckAWSS3ObjectLambdaAccessPointHasPolicy(resourceName, expectedPolicyText2), + ), + }, + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_noPolicy(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "policy", ""), + ), + }, + }, + }) +} + +func TestAccAWSS3ObjectLambdaAccessPoint_PublicAccessBlockConfiguration(t *testing.T) { + var v s3control.GetAccessPointForObjectLambdaOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_object_lambda_access_point.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_publicAccessBlock(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckResourceAttrAccountID(resourceName, "account_id"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "bucket", rName), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "network_origin", "Internet"), + resource.TestCheckResourceAttr(resourceName, "policy", ""), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "false"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "false"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "false"), + resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSS3ObjectLambdaAccessPoint_VpcConfiguration(t *testing.T) { + var v s3control.GetAccessPointForObjectLambdaOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_object_lambda_access_point.test" + vpcResourceName := "aws_vpc.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3ObjectLambdaAccessPointConfig_vpc(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckResourceAttrAccountID(resourceName, "account_id"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "bucket", rName), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "network_origin", "VPC"), + resource.TestCheckResourceAttr(resourceName, "policy", ""), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), + resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "true"), + resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_configuration.0.vpc_id", vpcResourceName, "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAWSS3ObjectLambdaAccessPointDisappears(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No S3 Access Point ID is set") + } + + accountId, name, err := s3AccessPointParseId(rs.Primary.ID) + if err != nil { + return err + } + + conn := testAccProvider.Meta().(*AWSClient).s3controlconn + + _, err = conn.DeleteAccessPoint(&s3control.DeleteAccessPointInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + if err != nil { + return err + } + + return nil + } +} + +func testAccCheckAWSS3ObjectLambdaAccessPointDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).s3controlconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_object_lambda_access_point" { + continue + } + + accountId, name, err := s3AccessPointParseId(rs.Primary.ID) + if err != nil { + return err + } + + _, err = conn.GetAccessPointForObjectLambda(&s3control.GetAccessPointForObjectLambdaInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + if err == nil { + return fmt.Errorf("S3 Access Point still exists") + } + } + return nil +} + +func testAccCheckAWSS3ObjectLambdaAccessPointExists(n string, output *s3control.GetAccessPointForObjectLambdaOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No S3 Access Point ID is set") + } + + accountId, name, err := s3AccessPointParseId(rs.Primary.ID) + if err != nil { + return err + } + + conn := testAccProvider.Meta().(*AWSClient).s3controlconn + + resp, err := conn.GetAccessPointForObjectLambda(&s3control.GetAccessPointForObjectLambdaInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + if err != nil { + return err + } + + *output = *resp + + return nil + } +} + +func testAccCheckAWSS3ObjectLambdaAccessPointHasPolicy(n string, fn func() string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No S3 Access Point ID is set") + } + + accountId, name, err := s3AccessPointParseId(rs.Primary.ID) + if err != nil { + return err + } + + conn := testAccProvider.Meta().(*AWSClient).s3controlconn + + resp, err := conn.GetAccessPointPolicyForObjectLambda(&s3control.GetAccessPointForObjectLambdaPolicyInput{ + AccountId: aws.String(accountId), + Name: aws.String(name), + }) + if err != nil { + return err + } + + actualPolicyText := *resp.Policy + expectedPolicyText := fn() + + equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) + if err != nil { + return fmt.Errorf("Error testing policy equivalence: %s", err) + } + if !equivalent { + return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", + expectedPolicyText, actualPolicyText) + } + + return nil + } +} + +func testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_object_lambda_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[2]q +} +`, bucketName, accessPointName) +} + +func testAccAWSS3ObjectLambdaAccessPointConfig_Bucket_Arn(rName string) string { + return fmt.Sprintf(` +data "aws_outposts_outposts" "test" {} + +data "aws_outposts_outpost" "test" { + id = tolist(data.aws_outposts_outposts.test.ids)[0] +} + +resource "aws_s3control_bucket" "test" { + bucket = %[1]q + outpost_id = data.aws_outposts_outpost.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_s3_object_lambda_access_point" "test" { + bucket = aws_s3control_bucket.test.arn + name = %[1]q + + vpc_configuration { + vpc_id = aws_vpc.test.id + } +} +`, rName) +} + +func testAccAWSS3ObjectLambdaAccessPointConfig_policy(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_object_lambda_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[1]q + policy = data.aws_iam_policy_document.test.json + + public_access_block_configuration { + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false + } +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} +data "aws_region" "current" {} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + + actions = [ + "s3:GetObjectTagging", + ] + + resources = [ + "arn:${data.aws_partition.current.partition}:s3:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:accesspoint/%[1]s/object/*", + ] + + principals { + type = "AWS" + identifiers = ["*"] + } + } +} +`, rName) +} + +func testAccAWSS3ObjectLambdaAccessPointConfig_policyUpdated(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_object_lambda_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[1]q + policy = data.aws_iam_policy_document.test.json + + public_access_block_configuration { + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false + } +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} +data "aws_region" "current" {} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + + actions = [ + "s3:GetObjectLegalHold", + "s3:GetObjectRetention" + ] + + resources = [ + "arn:${data.aws_partition.current.partition}:s3:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:accesspoint/%[1]s/object/*", + ] + + principals { + type = "AWS" + identifiers = ["*"] + } + } +} +`, rName) +} + +func testAccAWSS3ObjectLambdaAccessPointConfig_noPolicy(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_object_lambda_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[1]q + + public_access_block_configuration { + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false + } +} +`, rName) +} + +func testAccAWSS3ObjectLambdaAccessPointConfig_publicAccessBlock(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_object_lambda_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[1]q + + public_access_block_configuration { + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false + } +} +`, rName) +} + +func testAccAWSS3ObjectLambdaAccessPointConfig_vpc(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_object_lambda_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[1]q + + vpc_configuration { + vpc_id = aws_vpc.test.id + } +} +`, rName) +} From f557d8660eaf964380f60418d90449d592f26e35 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Tue, 25 May 2021 14:19:20 +0200 Subject: [PATCH 166/304] small steps --- resource_aws_s3_object_lambda_access_point.go | 30 +++++++++++++++++-- ..._aws_s3_object_lambda_access_point_test.go | 2 +- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/resource_aws_s3_object_lambda_access_point.go b/resource_aws_s3_object_lambda_access_point.go index c6972b8f72c4..c00be4c2edbe 100644 --- a/resource_aws_s3_object_lambda_access_point.go +++ b/resource_aws_s3_object_lambda_access_point.go @@ -92,8 +92,32 @@ func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { } } -func expandObjectLambdaTransformationConfiguration(tConfig []interface{}) *s3control.ObjectLambdaTransformationConfiguration { - return &s3control.ObjectLambdaTransformationConfiguration{} +func expandObjectLambdaContentTransformation(vConfig []interface{}) *s3control.ObjectLambdaContentTransformation { + if len(vConfig) == 0 || vConfig[0] == nil { + return nil + } + + mConfig := vConfig[0].(map[string]interface{}) + + return &s3control.ObjectLambdaContentTransformation{ + AwsLambda: &s3control.AwsLambdaTransformation{ + FunctionArn: aws.String(mConfig["aws_lambda"]["function_arn"]), + FunctionPayload: aws.String(mConfig["aws_lambda"]["function_payload"]), + }, + } + +} + +func expandObjectLambdaTransformationConfiguration(vConfig []interface{}) *s3control.ObjectLambdaTransformationConfiguration { + if len(vConfig) == 0 || vConfig[0] == nil { + return nil + } + mConfig := vConfig[0].(map[string]interface{}) + + return &s3control.ObjectLambdaTransformationConfiguration{ + Actions: expandStringSet(mConfig["actions"].(*schema.Set)), + ContentTransformation: expandObjectLambdaContentTransformation(mConfig["content_transformation"].([]interface{})), + } } func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta interface{}) error { @@ -109,7 +133,7 @@ func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta int AllowedFeatures: expandStringSet(d.Get("allowed_features").(*schema.Set)), CloudWatchMetricsEnabled: aws.Bool(d.Get("cloud_watch_metrics_enabled").(bool)), SupportingAccessPoint: aws.String(d.Get("supporting_access_point").(string)), - TransformationConfigurations: expandObjectLambdaTransformationConfiguration(d.Get("transformation_configurations").([]interface{})), + TransformationConfigurations: expandObjectLambdaTransformationConfiguration(d.Get("transformation_configurations").([]*interface{})), } input := &s3control.CreateAccessPointForObjectLambdaInput{ diff --git a/resource_aws_s3_object_lambda_access_point_test.go b/resource_aws_s3_object_lambda_access_point_test.go index 831f09c13d7a..e31721e7bb4b 100644 --- a/resource_aws_s3_object_lambda_access_point_test.go +++ b/resource_aws_s3_object_lambda_access_point_test.go @@ -487,7 +487,7 @@ func testAccCheckAWSS3ObjectLambdaAccessPointHasPolicy(n string, fn func() strin conn := testAccProvider.Meta().(*AWSClient).s3controlconn - resp, err := conn.GetAccessPointPolicyForObjectLambda(&s3control.GetAccessPointForObjectLambdaPolicyInput{ + resp, err := conn.GetAccessPointPolicyForObjectLambda(&s3control.GetAccessPointPolicyForObjectLambdaInput{ AccountId: aws.String(accountId), Name: aws.String(name), }) From b630c47161b59e71ab3bd8bbd1f912709dab70fe Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 16:33:15 -0500 Subject: [PATCH 167/304] Rename resource and associated acceptance test files. --- .../service/s3control/object_lambda_access_point.go | 0 .../service/s3control/object_lambda_access_point_test.go | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename resource_aws_s3_object_lambda_access_point.go => internal/service/s3control/object_lambda_access_point.go (100%) rename resource_aws_s3_object_lambda_access_point_test.go => internal/service/s3control/object_lambda_access_point_test.go (100%) diff --git a/resource_aws_s3_object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go similarity index 100% rename from resource_aws_s3_object_lambda_access_point.go rename to internal/service/s3control/object_lambda_access_point.go diff --git a/resource_aws_s3_object_lambda_access_point_test.go b/internal/service/s3control/object_lambda_access_point_test.go similarity index 100% rename from resource_aws_s3_object_lambda_access_point_test.go rename to internal/service/s3control/object_lambda_access_point_test.go From 50834faa1b69ace22940ec64b7f49fc300c2b86a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 17:11:15 -0500 Subject: [PATCH 168/304] r/aws_s3control_object_lambda_access_point: First compiling version. --- internal/provider/provider.go | 1 + .../s3control/object_lambda_access_point.go | 248 +++++++----------- 2 files changed, 91 insertions(+), 158 deletions(-) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 3999b1ea157f..f62305f382e1 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1485,6 +1485,7 @@ func Provider() *schema.Provider { "aws_s3control_bucket_policy": s3control.ResourceBucketPolicy(), "aws_s3control_multi_region_access_point": s3control.ResourceMultiRegionAccessPoint(), "aws_s3control_multi_region_access_point_policy": s3control.ResourceMultiRegionAccessPointPolicy(), + "aws_s3control_object_lambda_access_point": s3control.ResourceObjectLambdaAccessPoint(), "aws_s3outposts_endpoint": s3outposts.ResourceEndpoint(), diff --git a/internal/service/s3control/object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go index c00be4c2edbe..d19498bb520a 100644 --- a/internal/service/s3control/object_lambda_access_point.go +++ b/internal/service/s3control/object_lambda_access_point.go @@ -1,6 +1,4 @@ -// https://pkg.go.dev/github.com/aws/aws-sdk-go@v1.38.31/service/s3control?utm_source=gopls -// https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_access_point -package aws +package s3control import ( "fmt" @@ -12,15 +10,18 @@ import ( "github.com/aws/aws-sdk-go/service/s3control" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tfs3control "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3control" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { +func ResourceObjectLambdaAccessPoint() *schema.Resource { return &schema.Resource{ - Create: resourceAwsS3ObjectLambdaAccessPointCreate, - Read: resourceAwsS3ObjectLambdaAccessPointRead, - Update: resourceAwsS3ObjectLambdaAccessPointUpdate, - Delete: resourceAwsS3ObjectLambdaAccessPointDelete, + Create: resourceObjectLambdaAccessPointCreate, + Read: resourceObjectLambdaAccessPointRead, + Update: resourceObjectLambdaAccessPointUpdate, + Delete: resourceObjectLambdaAccessPointDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -32,48 +33,54 @@ func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ValidateFunc: validateAwsAccountId, + ValidateFunc: verify.ValidAccountID, }, - "allowed_features": { Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(s3control.ObjectLambdaAllowedFeature_Values(), false), + }, }, - "cloud_watch_metrics_enabled": { Type: schema.TypeBool, Optional: true, }, - "supporting_access_point": { - Type: schema.TypeString, - Optional: false, + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, }, - - "transformation_configurations": { + "transformation_configuration": { Type: schema.TypeList, - Optional: false, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "actions": { - Type: schema.TypeString, - Optional: false, + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(s3control.ObjectLambdaTransformationConfigurationAction_Values(), false), + }, }, - "Content_transformation": { + "content_transformation": { Type: schema.TypeList, - Optional: false, + Required: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "aws_lambda": { - Type: schema.TypeString, + Type: schema.TypeList, Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "function_arn": { - Type: schema.TypeString, - Optional: false, + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, }, "function_payload": { Type: schema.TypeString, @@ -92,58 +99,30 @@ func resourceAwsS3ObjectLambdaAccessPoint() *schema.Resource { } } -func expandObjectLambdaContentTransformation(vConfig []interface{}) *s3control.ObjectLambdaContentTransformation { - if len(vConfig) == 0 || vConfig[0] == nil { - return nil - } - - mConfig := vConfig[0].(map[string]interface{}) +func resourceObjectLambdaAccessPointCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn - return &s3control.ObjectLambdaContentTransformation{ - AwsLambda: &s3control.AwsLambdaTransformation{ - FunctionArn: aws.String(mConfig["aws_lambda"]["function_arn"]), - FunctionPayload: aws.String(mConfig["aws_lambda"]["function_payload"]), - }, - } - -} - -func expandObjectLambdaTransformationConfiguration(vConfig []interface{}) *s3control.ObjectLambdaTransformationConfiguration { - if len(vConfig) == 0 || vConfig[0] == nil { - return nil - } - mConfig := vConfig[0].(map[string]interface{}) - - return &s3control.ObjectLambdaTransformationConfiguration{ - Actions: expandStringSet(mConfig["actions"].(*schema.Set)), - ContentTransformation: expandObjectLambdaContentTransformation(mConfig["content_transformation"].([]interface{})), - } -} - -func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).s3controlconn - - accountId := meta.(*AWSClient).accountid + accountID := meta.(*conns.AWSClient).AccountID if v, ok := d.GetOk("account_id"); ok { - accountId = v.(string) + accountID = v.(string) } name := d.Get("name").(string) configuration := &s3control.ObjectLambdaConfiguration{ - AllowedFeatures: expandStringSet(d.Get("allowed_features").(*schema.Set)), - CloudWatchMetricsEnabled: aws.Bool(d.Get("cloud_watch_metrics_enabled").(bool)), - SupportingAccessPoint: aws.String(d.Get("supporting_access_point").(string)), - TransformationConfigurations: expandObjectLambdaTransformationConfiguration(d.Get("transformation_configurations").([]*interface{})), + AllowedFeatures: flex.ExpandStringSet(d.Get("allowed_features").(*schema.Set)), + CloudWatchMetricsEnabled: aws.Bool(d.Get("cloud_watch_metrics_enabled").(bool)), + SupportingAccessPoint: aws.String(d.Get("supporting_access_point").(string)), + //TransformationConfigurations: expandObjectLambdaTransformationConfiguration(d.Get("transformation_configurations").([]*interface{})), } input := &s3control.CreateAccessPointForObjectLambdaInput{ - AccountId: aws.String(accountId), + AccountId: aws.String(accountID), Configuration: configuration, Name: aws.String(name), } log.Printf("[DEBUG] Creating S3 Object Lambda Access Point: %s", input) - output, err := conn.CreateAccessPointForObjectLambda()(input) + output, err := conn.CreateAccessPointForObjectLambda(input) if err != nil { return fmt.Errorf("error creating S3 Control Access Point (%s): %w", name, err) @@ -153,33 +132,20 @@ func resourceAwsS3ObjectLambdaAccessPointCreate(d *schema.ResourceData, meta int return fmt.Errorf("error creating S3 Control Access Point (%s): empty response", name) } - parsedARN, err := arn.Parse(aws.StringValue(output.AccessPointArn)) + parsedARN, err := arn.Parse(aws.StringValue(output.ObjectLambdaAccessPointArn)) if err == nil && strings.HasPrefix(parsedARN.Resource, "outpost/") { - d.SetId(aws.StringValue(output.AccessPointArn)) - name = aws.StringValue(output.AccessPointArn) + d.SetId(aws.StringValue(output.ObjectLambdaAccessPointArn)) + name = aws.StringValue(output.ObjectLambdaAccessPointArn) } else { - d.SetId(fmt.Sprintf("%s:%s", accountId, name)) - } - - if v, ok := d.GetOk("policy"); ok { - log.Printf("[DEBUG] Putting S3 Object Lambda Access Point policy: %s", d.Id()) - _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - Policy: aws.String(v.(string)), - }) - - if err != nil { - return fmt.Errorf("error putting S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) - } + d.SetId(fmt.Sprintf("%s:%s", accountID, name)) } - return resourceAwsS3ObjectLambdaAccessPointRead(d, meta) + return resourceObjectLambdaAccessPointRead(d, meta) } -func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).s3controlconn +func resourceObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) if err != nil { @@ -191,7 +157,7 @@ func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta inter Name: aws.String(name), }) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, tfs3control.ErrCodeNoSuchAccessPoint) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { log.Printf("[WARN] S3 Object Lambda Access Point (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -230,8 +196,8 @@ func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta inter } else { accessPointARN := arn.ARN{ AccountID: accountId, - Partition: meta.(*AWSClient).partition, - Region: meta.(*AWSClient).region, + Partition: meta.(*conns.AWSClient).Partition, + Region: meta.(*conns.AWSClient).Region, Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), Service: "s3", } @@ -241,7 +207,7 @@ func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta inter } d.Set("account_id", accountId) - d.Set("domain_name", meta.(*AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountId))) + d.Set("domain_name", meta.(*conns.AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountId))) d.Set("name", output.Name) d.Set("network_origin", output.NetworkOrigin) if err := d.Set("public_access_block_configuration", flattenS3ObjectLambdaAccessPointPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)); err != nil { @@ -251,21 +217,6 @@ func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta inter return fmt.Errorf("error setting vpc_configuration: %s", err) } - policyOutput, err := conn.GetAccessPointPolicy(&s3control.GetAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - - if isAWSErr(err, "NoSuchAccessPointPolicy", "") { - d.Set("policy", "") - } else { - if err != nil { - return fmt.Errorf("error reading S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) - } - - d.Set("policy", policyOutput.Policy) - } - // Return early since S3 on Outposts cannot have public policies if strings.HasPrefix(name, "arn:") { d.Set("has_public_access_policy", false) @@ -273,62 +224,15 @@ func resourceAwsS3ObjectLambdaAccessPointRead(d *schema.ResourceData, meta inter return nil } - policyStatusOutput, err := conn.GetAccessPointPolicyStatus(&s3control.GetAccessPointPolicyStatusInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - - if isAWSErr(err, "NoSuchAccessPointPolicy", "") { - d.Set("has_public_access_policy", false) - } else { - if err != nil { - return fmt.Errorf("error reading S3 Object Lambda Access Point (%s) policy status: %s", d.Id(), err) - } - - d.Set("has_public_access_policy", policyStatusOutput.PolicyStatus.IsPublic) - } - return nil } -func resourceAwsS3ObjectLambdaAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).s3controlconn - - accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) - if err != nil { - return err - } - - if d.HasChange("policy") { - if v, ok := d.GetOk("policy"); ok { - log.Printf("[DEBUG] Putting S3 Object Lambda Access Point policy: %s", d.Id()) - _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - Policy: aws.String(v.(string)), - }) - - if err != nil { - return fmt.Errorf("error putting S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) - } - } else { - log.Printf("[DEBUG] Deleting S3 Object Lambda Access Point policy: %s", d.Id()) - _, err := conn.DeleteAccessPointForObjectLambdaPolicy(&s3control.DeleteAccessPointForObjectLambdaPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - - if err != nil { - return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s) policy: %s", d.Id(), err) - } - } - } - - return resourceAwsS3ObjectLambdaAccessPointRead(d, meta) +func resourceObjectLambdaAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceObjectLambdaAccessPointRead(d, meta) } -func resourceAwsS3ObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).s3controlconn +func resourceObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) if err != nil { @@ -341,9 +245,9 @@ func resourceAwsS3ObjectLambdaAccessPointDelete(d *schema.ResourceData, meta int Name: aws.String(name), }) - if isAWSErr(err, "NoSuchAccessPoint", "") { - return nil - } + // if isAWSErr(err, "NoSuchAccessPoint", "") { + // return nil + // } if err != nil { return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %s", d.Id(), err) @@ -352,6 +256,34 @@ func resourceAwsS3ObjectLambdaAccessPointDelete(d *schema.ResourceData, meta int return nil } +// func expandObjectLambdaContentTransformation(vConfig []interface{}) *s3control.ObjectLambdaContentTransformation { +// if len(vConfig) == 0 || vConfig[0] == nil { +// return nil +// } + +// mConfig := vConfig[0].(map[string]interface{}) + +// return &s3control.ObjectLambdaContentTransformation{ +// AwsLambda: &s3control.AwsLambdaTransformation{ +// FunctionArn: aws.String(mConfig["aws_lambda"]["function_arn"]), +// FunctionPayload: aws.String(mConfig["aws_lambda"]["function_payload"]), +// }, +// } + +// } + +// func expandObjectLambdaTransformationConfiguration(vConfig []interface{}) *s3control.ObjectLambdaTransformationConfiguration { +// if len(vConfig) == 0 || vConfig[0] == nil { +// return nil +// } +// mConfig := vConfig[0].(map[string]interface{}) + +// return &s3control.ObjectLambdaTransformationConfiguration{ +// Actions: expandStringSet(mConfig["actions"].(*schema.Set)), +// ContentTransformation: expandObjectLambdaContentTransformation(mConfig["content_transformation"].([]interface{})), +// } +// } + // S3ObjectLambdaAccessPointParseId returns the Account ID and Access Point Name (S3) or ARN (S3 on Outposts) func S3ObjectLambdaAccessPointParseId(id string) (string, string, error) { parsedARN, err := arn.Parse(id) From b63516aa791784dab5c8598f40289530048cc6a2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 17:17:41 -0500 Subject: [PATCH 169/304] r/aws_s3control_object_lambda_access_point: First compiling acceptance tests. --- .../object_lambda_access_point_test.go | 643 ++---------------- 1 file changed, 46 insertions(+), 597 deletions(-) diff --git a/internal/service/s3control/object_lambda_access_point_test.go b/internal/service/s3control/object_lambda_access_point_test.go index e31721e7bb4b..5cb30e301919 100644 --- a/internal/service/s3control/object_lambda_access_point_test.go +++ b/internal/service/s3control/object_lambda_access_point_test.go @@ -1,113 +1,35 @@ -package aws +package s3control_test import ( "fmt" - "log" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - awspolicy "github.com/jen20/awspolicyequivalence" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" ) -func init() { - resource.AddTestSweepers("aws_s3_object_lambda_access_point", &resource.Sweeper{ - Name: "aws_s3_object_lambda_access_point", - F: testSweepS3ObjectLambdaAccessPoints, - }) -} - -func testSweepS3ObjectLambdaAccessPoints(region string) error { - client, err := sharedClientForRegion(region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } - - accountId := client.(*AWSClient).accountid - conn := client.(*AWSClient).s3controlconn - - input := &s3control.ListAccessPointsForObjectLambdaInput{ - AccountId: aws.String(accountId), - } - var sweeperErrs *multierror.Error - - conn.ListAccessPointsForObjectLambdaPages(input, func(page *s3control.ListAccessPointsForObjectLambdaOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, ObjectLambdaAccessPoint := range page.ObjectLambdaAccessPointList { - input := &s3control.DeleteAccessPointForObjectLambdaInput{ - AccountId: aws.String(accountId), - Name: ObjectLambdaAccessPoint.Name, - } - name := aws.StringValue(ObjectLambdaAccessPoint.Name) - - log.Printf("[INFO] Deleting S3 Object Lambda Access Point: %s", name) - _, err := conn.DeleteAccessPointForObjectLambda(input) - - if isAWSErr(err, "NoSuchAccessPoint", "") { - continue - } - - if err != nil { - sweeperErr := fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %w", name, err) - log.Printf("[ERROR] %s", sweeperErr) - sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) - continue - } - } - - return !lastPage - }) - - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping S3 Object Lambda Access Point sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing S3 Object Lambda Access Points: %w", err) - } - - return sweeperErrs.ErrorOrNil() -} - -func TestAccAWSS3ObjectLambdaAccessPoint_basic(t *testing.T) { +func TestAccS3ControlObjectLambdaAccessPoint_basic(t *testing.T) { var v s3control.GetAccessPointForObjectLambdaOutput - bucketName := acctest.RandomWithPrefix("tf-acc-test") - accessPointName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_s3_object_lambda_access_point.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3control_object_lambda_access_point.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Config: testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckResourceAttrAccountID(resourceName, "account_id"), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", accessPointName)), - resource.TestCheckResourceAttr(resourceName, "bucket", bucketName), - testAccMatchResourceAttrRegionalHostname(resourceName, "domain_name", "s3-accesspoint", regexp.MustCompile(fmt.Sprintf("^%s-\\d{12}", accessPointName))), - resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "name", accessPointName), - resource.TestCheckResourceAttr(resourceName, "network_origin", "Internet"), - resource.TestCheckResourceAttr(resourceName, "policy", ""), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "true"), - resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "0"), + testAccCheckObjectLambdaAccessPointExists(resourceName, &v), ), }, { @@ -119,23 +41,23 @@ func TestAccAWSS3ObjectLambdaAccessPoint_basic(t *testing.T) { }) } -func TestAccAWSS3ObjectLambdaAccessPoint_disappears(t *testing.T) { +func TestAccS3ControlObjectLambdaAccessPoint_disappears(t *testing.T) { var v s3control.GetAccessPointForObjectLambdaOutput - bucketName := acctest.RandomWithPrefix("tf-acc-test") - accessPointName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_s3_object_lambda_access_point.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3control_object_lambda_access_point.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Config: testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckAWSS3ObjectLambdaAccessPointDisappears(resourceName), + testAccCheckObjectLambdaAccessPointExists(resourceName, &v), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceObjectLambdaAccessPoint(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -143,24 +65,24 @@ func TestAccAWSS3ObjectLambdaAccessPoint_disappears(t *testing.T) { }) } -func TestAccAWSS3ObjectLambdaAccessPoint_disappears_Bucket(t *testing.T) { +func TestAccS3ControlObjectLambdaAccessPoint_disappears_Bucket(t *testing.T) { var v s3control.GetAccessPointForObjectLambdaOutput - bucketName := acctest.RandomWithPrefix("tf-acc-test") - accessPointName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_s3_object_lambda_access_point.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3control_object_lambda_access_point.test" bucketResourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Config: testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckAWSS3DestroyBucket(bucketResourceName), + testAccCheckObjectLambdaAccessPointExists(resourceName, &v), + testAccCheckDestroyBucket(bucketResourceName), ), ExpectNonEmptyPlan: true, }, @@ -168,260 +90,15 @@ func TestAccAWSS3ObjectLambdaAccessPoint_disappears_Bucket(t *testing.T) { }) } -func TestAccAWSS3ObjectLambdaAccessPoint_Bucket_Arn(t *testing.T) { - var v s3control.GetAccessPointForObjectLambdaOutput - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_s3_object_lambda_access_point.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSOutpostsOutposts(t) }, - ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_Bucket_Arn(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckResourceAttrAccountID(resourceName, "account_id"), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3-outposts", fmt.Sprintf("outpost/[^/]+/accesspoint/%s", rName)), - resource.TestCheckResourceAttrPair(resourceName, "bucket", "aws_s3control_bucket.test", "arn"), - testAccMatchResourceAttrRegionalHostname(resourceName, "domain_name", "s3-accesspoint", regexp.MustCompile(fmt.Sprintf("^%s-\\d{12}", rName))), - resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "network_origin", "Vpc"), - resource.TestCheckResourceAttr(resourceName, "policy", ""), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "true"), - resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "vpc_configuration.0.vpc_id", "aws_vpc.test", "id"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSS3ObjectLambdaAccessPoint_Policy(t *testing.T) { - var v s3control.GetAccessPointForObjectLambdaOutput - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_s3_object_lambda_access_point.test" - - expectedPolicyText1 := func() string { - return fmt.Sprintf(`{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "", - "Effect": "Allow", - "Principal": { - "AWS": "*" - }, - "Action": "s3:GetObjectTagging", - "Resource": [ - "arn:%s:s3:%s:%s:accesspoint/%s/object/*" - ] - } - ] -}`, testAccGetPartition(), testAccGetRegion(), testAccGetAccountID(), rName) - } - expectedPolicyText2 := func() string { - return fmt.Sprintf(`{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "", - "Effect": "Allow", - "Principal": { - "AWS": "*" - }, - "Action": [ - "s3:GetObjectLegalHold", - "s3:GetObjectRetention" - ], - "Resource": [ - "arn:%s:s3:%s:%s:accesspoint/%s/object/*" - ] - } - ] -}`, testAccGetPartition(), testAccGetRegion(), testAccGetAccountID(), rName) - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_policy(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckAWSS3ObjectLambdaAccessPointHasPolicy(resourceName, expectedPolicyText1), - testAccCheckResourceAttrAccountID(resourceName, "account_id"), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "bucket", rName), - resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "true"), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "network_origin", "Internet"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "false"), - resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "0"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_policyUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckAWSS3ObjectLambdaAccessPointHasPolicy(resourceName, expectedPolicyText2), - ), - }, - { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_noPolicy(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "policy", ""), - ), - }, - }, - }) -} - -func TestAccAWSS3ObjectLambdaAccessPoint_PublicAccessBlockConfiguration(t *testing.T) { - var v s3control.GetAccessPointForObjectLambdaOutput - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_s3_object_lambda_access_point.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_publicAccessBlock(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckResourceAttrAccountID(resourceName, "account_id"), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "bucket", rName), - resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "network_origin", "Internet"), - resource.TestCheckResourceAttr(resourceName, "policy", ""), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "false"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "false"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "false"), - resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "0"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSS3ObjectLambdaAccessPoint_VpcConfiguration(t *testing.T) { - var v s3control.GetAccessPointForObjectLambdaOutput - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_s3_object_lambda_access_point.test" - vpcResourceName := "aws_vpc.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheck(t, s3control.EndpointsID), - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3ObjectLambdaAccessPointDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSS3ObjectLambdaAccessPointConfig_vpc(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3ObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckResourceAttrAccountID(resourceName, "account_id"), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "bucket", rName), - resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "network_origin", "VPC"), - resource.TestCheckResourceAttr(resourceName, "policy", ""), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", "true"), - resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", "true"), - resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "vpc_configuration.0.vpc_id", vpcResourceName, "id"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckAWSS3ObjectLambdaAccessPointDisappears(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Access Point ID is set") - } - - accountId, name, err := s3AccessPointParseId(rs.Primary.ID) - if err != nil { - return err - } - - conn := testAccProvider.Meta().(*AWSClient).s3controlconn - - _, err = conn.DeleteAccessPoint(&s3control.DeleteAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - if err != nil { - return err - } - - return nil - } -} - -func testAccCheckAWSS3ObjectLambdaAccessPointDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).s3controlconn +func testAccCheckObjectLambdaAccessPointDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_object_lambda_access_point" { + if rs.Type != "aws_s3control_object_lambda_access_point" { continue } - accountId, name, err := s3AccessPointParseId(rs.Primary.ID) + accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } @@ -437,7 +114,7 @@ func testAccCheckAWSS3ObjectLambdaAccessPointDestroy(s *terraform.State) error { return nil } -func testAccCheckAWSS3ObjectLambdaAccessPointExists(n string, output *s3control.GetAccessPointForObjectLambdaOutput) resource.TestCheckFunc { +func testAccCheckObjectLambdaAccessPointExists(n string, output *s3control.GetAccessPointForObjectLambdaOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -448,12 +125,12 @@ func testAccCheckAWSS3ObjectLambdaAccessPointExists(n string, output *s3control. return fmt.Errorf("No S3 Access Point ID is set") } - accountId, name, err := s3AccessPointParseId(rs.Primary.ID) + accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } - conn := testAccProvider.Meta().(*AWSClient).s3controlconn + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn resp, err := conn.GetAccessPointForObjectLambda(&s3control.GetAccessPointForObjectLambdaInput{ AccountId: aws.String(accountId), @@ -469,243 +146,15 @@ func testAccCheckAWSS3ObjectLambdaAccessPointExists(n string, output *s3control. } } -func testAccCheckAWSS3ObjectLambdaAccessPointHasPolicy(n string, fn func() string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Access Point ID is set") - } - - accountId, name, err := s3AccessPointParseId(rs.Primary.ID) - if err != nil { - return err - } - - conn := testAccProvider.Meta().(*AWSClient).s3controlconn - - resp, err := conn.GetAccessPointPolicyForObjectLambda(&s3control.GetAccessPointPolicyForObjectLambdaInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - if err != nil { - return err - } - - actualPolicyText := *resp.Policy - expectedPolicyText := fn() - - equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) - if err != nil { - return fmt.Errorf("Error testing policy equivalence: %s", err) - } - if !equivalent { - return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", - expectedPolicyText, actualPolicyText) - } - - return nil - } -} - -func testAccAWSS3ObjectLambdaAccessPointConfig_basic(bucketName, accessPointName string) string { +func testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } -resource "aws_s3_object_lambda_access_point" "test" { +resource "aws_s3control_object_lambda_access_point" "test" { bucket = aws_s3_bucket.test.bucket name = %[2]q } `, bucketName, accessPointName) } - -func testAccAWSS3ObjectLambdaAccessPointConfig_Bucket_Arn(rName string) string { - return fmt.Sprintf(` -data "aws_outposts_outposts" "test" {} - -data "aws_outposts_outpost" "test" { - id = tolist(data.aws_outposts_outposts.test.ids)[0] -} - -resource "aws_s3control_bucket" "test" { - bucket = %[1]q - outpost_id = data.aws_outposts_outpost.test.id -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_s3_object_lambda_access_point" "test" { - bucket = aws_s3control_bucket.test.arn - name = %[1]q - - vpc_configuration { - vpc_id = aws_vpc.test.id - } -} -`, rName) -} - -func testAccAWSS3ObjectLambdaAccessPointConfig_policy(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_object_lambda_access_point" "test" { - bucket = aws_s3_bucket.test.bucket - name = %[1]q - policy = data.aws_iam_policy_document.test.json - - public_access_block_configuration { - block_public_acls = true - block_public_policy = false - ignore_public_acls = true - restrict_public_buckets = false - } -} - -data "aws_caller_identity" "current" {} -data "aws_partition" "current" {} -data "aws_region" "current" {} - -data "aws_iam_policy_document" "test" { - statement { - effect = "Allow" - - actions = [ - "s3:GetObjectTagging", - ] - - resources = [ - "arn:${data.aws_partition.current.partition}:s3:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:accesspoint/%[1]s/object/*", - ] - - principals { - type = "AWS" - identifiers = ["*"] - } - } -} -`, rName) -} - -func testAccAWSS3ObjectLambdaAccessPointConfig_policyUpdated(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_object_lambda_access_point" "test" { - bucket = aws_s3_bucket.test.bucket - name = %[1]q - policy = data.aws_iam_policy_document.test.json - - public_access_block_configuration { - block_public_acls = true - block_public_policy = false - ignore_public_acls = true - restrict_public_buckets = false - } -} - -data "aws_caller_identity" "current" {} -data "aws_partition" "current" {} -data "aws_region" "current" {} - -data "aws_iam_policy_document" "test" { - statement { - effect = "Allow" - - actions = [ - "s3:GetObjectLegalHold", - "s3:GetObjectRetention" - ] - - resources = [ - "arn:${data.aws_partition.current.partition}:s3:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:accesspoint/%[1]s/object/*", - ] - - principals { - type = "AWS" - identifiers = ["*"] - } - } -} -`, rName) -} - -func testAccAWSS3ObjectLambdaAccessPointConfig_noPolicy(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_object_lambda_access_point" "test" { - bucket = aws_s3_bucket.test.bucket - name = %[1]q - - public_access_block_configuration { - block_public_acls = true - block_public_policy = false - ignore_public_acls = true - restrict_public_buckets = false - } -} -`, rName) -} - -func testAccAWSS3ObjectLambdaAccessPointConfig_publicAccessBlock(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_object_lambda_access_point" "test" { - bucket = aws_s3_bucket.test.bucket - name = %[1]q - - public_access_block_configuration { - block_public_acls = false - block_public_policy = false - ignore_public_acls = false - restrict_public_buckets = false - } -} -`, rName) -} - -func testAccAWSS3ObjectLambdaAccessPointConfig_vpc(rName string) string { - return fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_object_lambda_access_point" "test" { - bucket = aws_s3_bucket.test.bucket - name = %[1]q - - vpc_configuration { - vpc_id = aws_vpc.test.id - } -} -`, rName) -} From b718065f9100f11916a8b13a69c2d6bfe5e52d6a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 17:32:22 -0500 Subject: [PATCH 170/304] r/aws_s3control_object_lambda_access_point: Sweeper. Acceptance test output: % make sweep SWEEPARGS=-sweep-run=aws_s3control_object_lambda_access_point SWEEP=us-west-2,us-west-1,us-east-2,us-east-1 WARNING: This will destroy infrastructure. Use only in development accounts. go test ./internal/sweep -v -tags=sweep -sweep=us-west-2,us-west-1,us-east-2,us-east-1 -sweep-run=aws_s3control_object_lambda_access_point -timeout 60m 2021/11/14 17:30:10 [DEBUG] Running Sweepers for region (us-west-2): 2021/11/14 17:30:10 [DEBUG] Running Sweeper (aws_s3control_object_lambda_access_point) in region (us-west-2) 2021/11/14 17:30:10 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/14 17:30:10 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:10 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:11 [DEBUG] Completed Sweeper (aws_s3control_object_lambda_access_point) in region (us-west-2) in 1.195820054s 2021/11/14 17:30:11 Completed Sweepers for region (us-west-2) in 1.196017929s 2021/11/14 17:30:11 Sweeper Tests for region (us-west-2) ran successfully: - aws_s3control_object_lambda_access_point 2021/11/14 17:30:11 [DEBUG] Running Sweepers for region (us-west-1): 2021/11/14 17:30:11 [DEBUG] Running Sweeper (aws_s3control_object_lambda_access_point) in region (us-west-1) 2021/11/14 17:30:11 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/14 17:30:11 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:11 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:12 [DEBUG] Completed Sweeper (aws_s3control_object_lambda_access_point) in region (us-west-1) in 1.131984915s 2021/11/14 17:30:12 Completed Sweepers for region (us-west-1) in 1.132046741s 2021/11/14 17:30:12 Sweeper Tests for region (us-west-1) ran successfully: - aws_s3control_object_lambda_access_point 2021/11/14 17:30:12 [DEBUG] Running Sweepers for region (us-east-2): 2021/11/14 17:30:12 [DEBUG] Running Sweeper (aws_s3control_object_lambda_access_point) in region (us-east-2) 2021/11/14 17:30:12 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/14 17:30:12 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:12 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:14 [DEBUG] Completed Sweeper (aws_s3control_object_lambda_access_point) in region (us-east-2) in 1.594099203s 2021/11/14 17:30:14 Completed Sweepers for region (us-east-2) in 1.594175673s 2021/11/14 17:30:14 Sweeper Tests for region (us-east-2) ran successfully: - aws_s3control_object_lambda_access_point 2021/11/14 17:30:14 [DEBUG] Running Sweepers for region (us-east-1): 2021/11/14 17:30:14 [DEBUG] Running Sweeper (aws_s3control_object_lambda_access_point) in region (us-east-1) 2021/11/14 17:30:14 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/14 17:30:14 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:14 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/14 17:30:14 [DEBUG] Completed Sweeper (aws_s3control_object_lambda_access_point) in region (us-east-1) in 644.086847ms 2021/11/14 17:30:14 Completed Sweepers for region (us-east-1) in 644.253217ms 2021/11/14 17:30:14 Sweeper Tests for region (us-east-1) ran successfully: - aws_s3control_object_lambda_access_point ok github.com/hashicorp/terraform-provider-aws/internal/sweep 7.843s --- .../s3control/object_lambda_access_point.go | 8 +-- internal/service/s3control/sweep.go | 51 +++++++++++++++++++ 2 files changed, 55 insertions(+), 4 deletions(-) diff --git a/internal/service/s3control/object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go index d19498bb520a..b09bafbcb173 100644 --- a/internal/service/s3control/object_lambda_access_point.go +++ b/internal/service/s3control/object_lambda_access_point.go @@ -245,12 +245,12 @@ func resourceObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interfac Name: aws.String(name), }) - // if isAWSErr(err, "NoSuchAccessPoint", "") { - // return nil - // } + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + return nil + } if err != nil { - return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %w", d.Id(), err) } return nil diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index c8f712cd7045..a865eb735d19 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -25,6 +25,11 @@ func init() { Name: "aws_s3control_multi_region_access_point", F: sweepMultiRegionAccessPoints, }) + + resource.AddTestSweepers("aws_s3control_object_lambda_access_point", &resource.Sweeper{ + Name: "aws_s3control_object_lambda_access_point", + F: sweepObjectLambdaAccessPoints, + }) } func sweepAccessPoints(region string) error { @@ -122,3 +127,49 @@ func sweepMultiRegionAccessPoints(region string) error { return nil } + +func sweepObjectLambdaAccessPoints(region string) error { + client, err := sweep.SharedRegionalSweepClient(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*conns.AWSClient).S3ControlConn + accountID := client.(*conns.AWSClient).AccountID + input := &s3control.ListAccessPointsForObjectLambdaInput{ + AccountId: aws.String(accountID), + } + sweepResources := make([]*sweep.SweepResource, 0) + + conn.ListAccessPointsForObjectLambdaPages(input, func(page *s3control.ListAccessPointsForObjectLambdaOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, accessPoint := range page.ObjectLambdaAccessPointList { + r := ResourceObjectLambdaAccessPoint() + d := r.Data(nil) + d.SetId(AccessPointCreateResourceID(aws.StringValue(accessPoint.ObjectLambdaAccessPointArn), accountID, aws.StringValue(accessPoint.Name))) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + + return !lastPage + }) + + if sweep.SkipSweepError(err) { + log.Printf("[WARN] Skipping S3 Object Lambda Access Point sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing S3 Object Lambda Access Points (%s): %w", region, err) + } + + err = sweep.SweepOrchestrator(sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping S3 Object Lambda Access Points (%s): %w", region, err) + } + + return nil +} From 9ee63693c35a4dae66fa1913c808fef257116ce0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 17:36:41 -0500 Subject: [PATCH 171/304] Add CHANGELOG entry. --- .changelog/19294.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/19294.txt diff --git a/.changelog/19294.txt b/.changelog/19294.txt new file mode 100644 index 000000000000..a8d4357104e0 --- /dev/null +++ b/.changelog/19294.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_s3control_object_lambda_access_point +``` \ No newline at end of file From 518054deb7c45e9f158964779be4bf68f4d98f6b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 18:06:37 -0500 Subject: [PATCH 172/304] Add documentation. --- .../s3control/object_lambda_access_point.go | 79 ++++++++++------- ...l_object_lambda_access_point.html.markdown | 88 +++++++++++++++++++ 2 files changed, 135 insertions(+), 32 deletions(-) create mode 100644 website/docs/r/s3control_object_lambda_access_point.html.markdown diff --git a/internal/service/s3control/object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go index b09bafbcb173..2b86ec00e141 100644 --- a/internal/service/s3control/object_lambda_access_point.go +++ b/internal/service/s3control/object_lambda_access_point.go @@ -35,56 +35,66 @@ func ResourceObjectLambdaAccessPoint() *schema.Resource { ForceNew: true, ValidateFunc: verify.ValidAccountID, }, - "allowed_features": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(s3control.ObjectLambdaAllowedFeature_Values(), false), - }, - }, - "cloud_watch_metrics_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "supporting_access_point": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "transformation_configuration": { + "configuration": { Type: schema.TypeList, Required: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "actions": { + "allowed_features": { Type: schema.TypeSet, - Required: true, + Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(s3control.ObjectLambdaTransformationConfigurationAction_Values(), false), + ValidateFunc: validation.StringInSlice(s3control.ObjectLambdaAllowedFeature_Values(), false), }, }, - "content_transformation": { + "cloud_watch_metrics_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "supporting_access_point": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "transformation_configuration": { Type: schema.TypeList, Required: true, - MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "aws_lambda": { + "actions": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(s3control.ObjectLambdaTransformationConfigurationAction_Values(), false), + }, + }, + "content_transformation": { Type: schema.TypeList, - Optional: true, + Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "function_arn": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "function_payload": { - Type: schema.TypeString, + "aws_lambda": { + Type: schema.TypeList, Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "function_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "function_payload": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, }, }, }, @@ -95,6 +105,11 @@ func ResourceObjectLambdaAccessPoint() *schema.Resource { }, }, }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, }, } } diff --git a/website/docs/r/s3control_object_lambda_access_point.html.markdown b/website/docs/r/s3control_object_lambda_access_point.html.markdown new file mode 100644 index 000000000000..c654b15271db --- /dev/null +++ b/website/docs/r/s3control_object_lambda_access_point.html.markdown @@ -0,0 +1,88 @@ +--- +subcategory: "S3 Control" +layout: "aws" +page_title: "AWS: aws_s3control_object_lambda_access_point" +description: |- + Provides a resource to manage an S3 Object Lambda Access Point. +--- + +# Resource: aws_s3control_object_lambda_access_point + +Provides a resource to manage an S3 Object Lambda Access Point. +An Object Lambda access point is associated with exactly one [standard access point](s3_access_point.html) and thus one Amazon S3 bucket. + +## Example Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "example" +} + +resource "aws_s3_access_point" "example" { + bucket = aws_s3_bucket.example.id + name = "example" +} + +resource "aws_s3control_object_lambda_access_point" "example" { + name = "example" + + configuration { + supporting_access_point = aws_s3_access_point.example.arn + + transformation_configuration { + actions = ["GetObject"] + + content_transformation { + aws_lambda { + function_arn = aws_lambda_function.example.arn + } + } + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `account_id` - (Optional) The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. +* `configuration` - (Required) A configuration block containing details about the Object Lambda Access Point. See [Configuration](#configuration) below for more details. +* `name` - (Required) The name for this Object Lambda Access Point. + +### Configuration + +The `configuration` block supports the following: + +* `allowed_features` - (Optional) Allowed features. Valid values: `GetObject-Range`, `GetObject-PartNumber`. +* `cloud_watch_metrics_enabled` - (Optional) Whether or not the CloudWatch metrics configuration is enabled. +* `supporting_access_point` - (Required) Standard access point associated with the Object Lambda Access Point. +* `transformation_configuration` - (Required) List of transformation configurations for the Object Lambda Access Point. See [Transformation Configuration](#transformation-configuration) below for more details. + +### Transformation Configuration + +The `transformation_configuration` block supports the following: + +* `actions` - (Required) The actions of an Object Lambda Access Point configuration. Valid values: `GetObject`. +* `content_transformation` - (Required) The content transformation of an Object Lambda Access Point configuration. See [Content Transformation](#content-transformation) below for more details. + +### Content Transformation + +The `content_transformation` block supports the following: + +* `aws_lambda` - (Optional) Configuration for an AWS Lambda function. See [AWS Lambda](#aws-lambda) below for more details. + +### AWS Lambda + +The `aws_lambda` block supports the following: + +* `function_arn` - (Required) The Amazon Resource Name (ARN) of the AWS Lambda function. +* `function_payload` - (Optional) Additional JSON that provides supplemental data to the Lambda function used to transform objects. + +## Import + +Object Lambda Access Points can be imported using the `account_id` and `name`, separated by a colon (`:`), e.g. + +``` +$ terraform import aws_s3control_object_lambda_access_point.example 123456789012:example +``` From d0065a2cc48ba72ae4c3d493d5c107072b24ed93 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 18:43:00 -0500 Subject: [PATCH 173/304] Add 'arn' attribute. --- .../r/s3control_object_lambda_access_point.html.markdown | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/website/docs/r/s3control_object_lambda_access_point.html.markdown b/website/docs/r/s3control_object_lambda_access_point.html.markdown index c654b15271db..85b6459cbba1 100644 --- a/website/docs/r/s3control_object_lambda_access_point.html.markdown +++ b/website/docs/r/s3control_object_lambda_access_point.html.markdown @@ -79,6 +79,13 @@ The `aws_lambda` block supports the following: * `function_arn` - (Required) The Amazon Resource Name (ARN) of the AWS Lambda function. * `function_payload` - (Optional) Additional JSON that provides supplemental data to the Lambda function used to transform objects. +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - Amazon Resource Name (ARN) of the Object Lambda Access Point. +* `id` - The AWS account ID and access point name separated by a colon (`:`). + ## Import Object Lambda Access Points can be imported using the `account_id` and `name`, separated by a colon (`:`), e.g. From dc7ae2160b05d6c84a4f92207f72d4b34eb55fbc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 18:43:48 -0500 Subject: [PATCH 174/304] Add and use 'FindObjectLambdaAccessPointByAccountIDAndName'. --- internal/service/s3control/find.go | 26 ++ .../s3control/object_lambda_access_point.go | 255 +++++++++++------- .../object_lambda_access_point_test.go | 4 +- 3 files changed, 179 insertions(+), 106 deletions(-) diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index f9104f11100a..67f987d39286 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -103,3 +103,29 @@ func FindMultiRegionAccessPointPolicyDocumentByAccountIDAndName(conn *s3control. return output.Policy, nil } + +func FindObjectLambdaAccessPointByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (*s3control.ObjectLambdaConfiguration, error) { + input := &s3control.GetAccessPointConfigurationForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + output, err := conn.GetAccessPointConfigurationForObjectLambda(input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Configuration == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Configuration, nil +} diff --git a/internal/service/s3control/object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go index 2b86ec00e141..1fa9607139b2 100644 --- a/internal/service/s3control/object_lambda_access_point.go +++ b/internal/service/s3control/object_lambda_access_point.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -35,6 +36,10 @@ func ResourceObjectLambdaAccessPoint() *schema.Resource { ForceNew: true, ValidateFunc: verify.ValidAccountID, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, "configuration": { Type: schema.TypeList, Required: true, @@ -60,7 +65,7 @@ func ResourceObjectLambdaAccessPoint() *schema.Resource { ValidateFunc: verify.ValidARN, }, "transformation_configuration": { - Type: schema.TypeList, + Type: schema.TypeSet, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -122,153 +127,212 @@ func resourceObjectLambdaAccessPointCreate(d *schema.ResourceData, meta interfac accountID = v.(string) } name := d.Get("name").(string) + resourceID := ObjectLambdaAccessPointCreateResourceID(accountID, name) - configuration := &s3control.ObjectLambdaConfiguration{ - AllowedFeatures: flex.ExpandStringSet(d.Get("allowed_features").(*schema.Set)), - CloudWatchMetricsEnabled: aws.Bool(d.Get("cloud_watch_metrics_enabled").(bool)), - SupportingAccessPoint: aws.String(d.Get("supporting_access_point").(string)), - //TransformationConfigurations: expandObjectLambdaTransformationConfiguration(d.Get("transformation_configurations").([]*interface{})), + input := &s3control.CreateAccessPointForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), } - input := &s3control.CreateAccessPointForObjectLambdaInput{ - AccountId: aws.String(accountID), - Configuration: configuration, - Name: aws.String(name), + if v, ok := d.GetOk("configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.Configuration = expandObjectLambdaConfiguration(v.([]interface{})[0].(map[string]interface{})) } log.Printf("[DEBUG] Creating S3 Object Lambda Access Point: %s", input) - output, err := conn.CreateAccessPointForObjectLambda(input) + _, err := conn.CreateAccessPointForObjectLambda(input) if err != nil { - return fmt.Errorf("error creating S3 Control Access Point (%s): %w", name, err) + return fmt.Errorf("error creating S3 Object Lambda Access Point (%s): %w", resourceID, err) } - if output == nil { - return fmt.Errorf("error creating S3 Control Access Point (%s): empty response", name) + d.SetId(resourceID) + + return resourceObjectLambdaAccessPointRead(d, meta) +} + +func resourceObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := ObjectLambdaAccessPointParseResourceID(d.Id()) + + if err != nil { + return err } - parsedARN, err := arn.Parse(aws.StringValue(output.ObjectLambdaAccessPointArn)) + _, err = FindObjectLambdaAccessPointByAccountIDAndName(conn, accountID, name) - if err == nil && strings.HasPrefix(parsedARN.Resource, "outpost/") { - d.SetId(aws.StringValue(output.ObjectLambdaAccessPointArn)) - name = aws.StringValue(output.ObjectLambdaAccessPointArn) - } else { - d.SetId(fmt.Sprintf("%s:%s", accountID, name)) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] S3 Object Lambda Access Point (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } + if err != nil { + return fmt.Errorf("error reading S3 Object Lambda Access Point (%s): %w", d.Id(), err) + } + + d.Set("account_id", accountID) + arn := arn.ARN{ + Partition: meta.(*conns.AWSClient).Partition, + Service: "s3-object-lambda", + AccountID: accountID, + Resource: fmt.Sprintf("accesspoint/%s", name), + }.String() + d.Set("arn", arn) + d.Set("name", name) + + return nil +} + +func resourceObjectLambdaAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { return resourceObjectLambdaAccessPointRead(d, meta) } -func resourceObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{}) error { +func resourceObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) + accountID, name, err := ObjectLambdaAccessPointParseResourceID(d.Id()) + if err != nil { return err } - output, err := conn.GetAccessPoint(&s3control.GetAccessPointInput{ - AccountId: aws.String(accountId), + log.Printf("[DEBUG] Deleting S3 Object Lambda Access Point: %s", d.Id()) + _, err = conn.DeleteAccessPointForObjectLambda(&s3control.DeleteAccessPointForObjectLambdaInput{ + AccountId: aws.String(accountID), Name: aws.String(name), }) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { - log.Printf("[WARN] S3 Object Lambda Access Point (%s) not found, removing from state", d.Id()) - d.SetId("") + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { return nil } if err != nil { - return fmt.Errorf("error reading S3 Object Lambda Access Point (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %w", d.Id(), err) } - if output == nil { - return fmt.Errorf("error reading S3 Object Lambda Access Point (%s): empty response", d.Id()) + return nil +} + +const objectLambdaAccessPointResourceIDSeparator = ":" + +func ObjectLambdaAccessPointCreateResourceID(accountID, accessPointName string) string { + parts := []string{accountID, accessPointName} + id := strings.Join(parts, objectLambdaAccessPointResourceIDSeparator) + + return id +} + +func ObjectLambdaAccessPointParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, objectLambdaAccessPointResourceIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil } - if strings.HasPrefix(name, "arn:") { - parsedAccessPointARN, err := arn.Parse(name) + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected account-id%[2]saccess-point-name", id, objectLambdaAccessPointResourceIDSeparator) +} - if err != nil { - return fmt.Errorf("error parsing S3 Control Access Point ARN (%s): %w", name, err) - } +func expandObjectLambdaConfiguration(tfMap map[string]interface{}) *s3control.ObjectLambdaConfiguration { + if tfMap == nil { + return nil + } - bucketARN := arn.ARN{ - AccountID: parsedAccessPointARN.AccountID, - Partition: parsedAccessPointARN.Partition, - Region: parsedAccessPointARN.Region, - Resource: strings.Replace( - parsedAccessPointARN.Resource, - fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), - fmt.Sprintf("bucket/%s", aws.StringValue(output.Bucket)), - 1, - ), - Service: parsedAccessPointARN.Service, - } + apiObject := &s3control.ObjectLambdaConfiguration{} - d.Set("arn", name) - d.Set("bucket", bucketARN.String()) - } else { - accessPointARN := arn.ARN{ - AccountID: accountId, - Partition: meta.(*conns.AWSClient).Partition, - Region: meta.(*conns.AWSClient).Region, - Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), - Service: "s3", - } + if v, ok := tfMap["allowed_features"].(*schema.Set); ok && v.Len() > 0 { + apiObject.AllowedFeatures = flex.ExpandStringSet(v) + } - d.Set("arn", accessPointARN.String()) - d.Set("bucket", output.Bucket) + if v, ok := tfMap["cloud_watch_metrics_enabled"].(bool); ok && v { + apiObject.CloudWatchMetricsEnabled = aws.Bool(v) } - d.Set("account_id", accountId) - d.Set("domain_name", meta.(*conns.AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountId))) - d.Set("name", output.Name) - d.Set("network_origin", output.NetworkOrigin) - if err := d.Set("public_access_block_configuration", flattenS3ObjectLambdaAccessPointPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)); err != nil { - return fmt.Errorf("error setting public_access_block_configuration: %s", err) + if v, ok := tfMap["supporting_access_point"].(string); ok && v != "" { + apiObject.SupportingAccessPoint = aws.String(v) } - if err := d.Set("vpc_configuration", flattenS3ObjectLambdaAccessPointVpcConfiguration(output.VpcConfiguration)); err != nil { - return fmt.Errorf("error setting vpc_configuration: %s", err) + + if v, ok := tfMap["transformation_configuration"].(*schema.Set); ok && v.Len() > 0 { + apiObject.TransformationConfigurations = expandObjectLambdaTransformationConfigurations(v.List()) } - // Return early since S3 on Outposts cannot have public policies - if strings.HasPrefix(name, "arn:") { - d.Set("has_public_access_policy", false) + return apiObject +} +func expandObjectLambdaTransformationConfiguration(tfMap map[string]interface{}) *s3control.ObjectLambdaTransformationConfiguration { + if tfMap == nil { return nil } - return nil + apiObject := &s3control.ObjectLambdaTransformationConfiguration{} + + if v, ok := tfMap["actions"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Actions = flex.ExpandStringSet(v) + } + + if v, ok := tfMap["content_transformation"].([]interface{}); ok && len(v) > 0 { + apiObject.ContentTransformation = expandObjectLambdaContentTransformation(v[0].(map[string]interface{})) + } + + return apiObject } -func resourceObjectLambdaAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { - return resourceObjectLambdaAccessPointRead(d, meta) +func expandObjectLambdaTransformationConfigurations(tfList []interface{}) []*s3control.ObjectLambdaTransformationConfiguration { + if len(tfList) == 0 { + return nil + } + + var apiObjects []*s3control.ObjectLambdaTransformationConfiguration + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandObjectLambdaTransformationConfiguration(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects } -func resourceObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).S3ControlConn +func expandObjectLambdaContentTransformation(tfMap map[string]interface{}) *s3control.ObjectLambdaContentTransformation { + if tfMap == nil { + return nil + } - accountId, name, err := S3ObjectLambdaAccessPointParseId(d.Id()) - if err != nil { - return err + apiObject := &s3control.ObjectLambdaContentTransformation{} + + if v, ok := tfMap["aws_lambda"].([]interface{}); ok && len(v) > 0 { + apiObject.AwsLambda = expandAwsLambdaTransformation(v[0].(map[string]interface{})) } - log.Printf("[DEBUG] Deleting S3 Object Lambda Access Point: %s", d.Id()) - _, err = conn.DeleteAccessPointForObjectLambda(&s3control.DeleteAccessPointForObjectLambdaInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + return apiObject +} - if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { +func expandAwsLambdaTransformation(tfMap map[string]interface{}) *s3control.AwsLambdaTransformation { + if tfMap == nil { return nil } - if err != nil { - return fmt.Errorf("error deleting S3 Object Lambda Access Point (%s): %w", d.Id(), err) + apiObject := &s3control.AwsLambdaTransformation{} + + if v, ok := tfMap["function_arn"].(string); ok && v != "" { + apiObject.FunctionArn = aws.String(v) } - return nil + if v, ok := tfMap["function_payload"].(string); ok && v != "" { + apiObject.FunctionPayload = aws.String(v) + } + + return apiObject } // func expandObjectLambdaContentTransformation(vConfig []interface{}) *s3control.ObjectLambdaContentTransformation { @@ -299,23 +363,6 @@ func resourceObjectLambdaAccessPointDelete(d *schema.ResourceData, meta interfac // } // } -// S3ObjectLambdaAccessPointParseId returns the Account ID and Access Point Name (S3) or ARN (S3 on Outposts) -func S3ObjectLambdaAccessPointParseId(id string) (string, string, error) { - parsedARN, err := arn.Parse(id) - - if err == nil { - return parsedARN.AccountID, id, nil - } - - parts := strings.SplitN(id, ":", 2) - - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", fmt.Errorf("unexpected format of ID (%s), expected ACCOUNT_ID:NAME", id) - } - - return parts[0], parts[1], nil -} - func expandS3ObjectLambdaAccessPointVpcConfiguration(vConfig []interface{}) *s3control.VpcConfiguration { if len(vConfig) == 0 || vConfig[0] == nil { return nil diff --git a/internal/service/s3control/object_lambda_access_point_test.go b/internal/service/s3control/object_lambda_access_point_test.go index 5cb30e301919..6ecbbe4b509d 100644 --- a/internal/service/s3control/object_lambda_access_point_test.go +++ b/internal/service/s3control/object_lambda_access_point_test.go @@ -98,7 +98,7 @@ func testAccCheckObjectLambdaAccessPointDestroy(s *terraform.State) error { continue } - accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + accountId, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } @@ -125,7 +125,7 @@ func testAccCheckObjectLambdaAccessPointExists(n string, output *s3control.GetAc return fmt.Errorf("No S3 Access Point ID is set") } - accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + accountId, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } From d99e984a9215a28aa3098c77422f793661de0855 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 14 Nov 2021 18:45:07 -0500 Subject: [PATCH 175/304] 'S3ControlConn' -> 'S3ControlConnForMRAP'. --- internal/service/s3control/multi_region_access_point.go | 8 ++++---- .../service/s3control/multi_region_access_point_policy.go | 6 +++--- .../s3control/multi_region_access_point_policy_test.go | 2 +- .../service/s3control/multi_region_access_point_test.go | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/service/s3control/multi_region_access_point.go b/internal/service/s3control/multi_region_access_point.go index 829be111286a..2ff2ccc78256 100644 --- a/internal/service/s3control/multi_region_access_point.go +++ b/internal/service/s3control/multi_region_access_point.go @@ -132,7 +132,7 @@ func ResourceMultiRegionAccessPoint() *schema.Resource { } func resourceMultiRegionAccessPointCreate(d *schema.ResourceData, meta interface{}) error { - conn, err := S3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConnForMRAP(meta.(*conns.AWSClient)) if err != nil { return err @@ -172,7 +172,7 @@ func resourceMultiRegionAccessPointCreate(d *schema.ResourceData, meta interface } func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{}) error { - conn, err := S3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConnForMRAP(meta.(*conns.AWSClient)) if err != nil { return err @@ -217,7 +217,7 @@ func resourceMultiRegionAccessPointRead(d *schema.ResourceData, meta interface{} } func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface{}) error { - conn, err := S3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConnForMRAP(meta.(*conns.AWSClient)) if err != nil { return err @@ -254,7 +254,7 @@ func resourceMultiRegionAccessPointDelete(d *schema.ResourceData, meta interface return nil } -func S3ControlConn(client *conns.AWSClient) (*s3control.S3Control, error) { +func S3ControlConnForMRAP(client *conns.AWSClient) (*s3control.S3Control, error) { originalConn := client.S3ControlConn // All Multi-Region Access Point actions are routed to the US West (Oregon) Region. region := endpoints.UsWest2RegionID diff --git a/internal/service/s3control/multi_region_access_point_policy.go b/internal/service/s3control/multi_region_access_point_policy.go index 704a87547005..03ce1816e15f 100644 --- a/internal/service/s3control/multi_region_access_point_policy.go +++ b/internal/service/s3control/multi_region_access_point_policy.go @@ -73,7 +73,7 @@ func ResourceMultiRegionAccessPointPolicy() *schema.Resource { } func resourceMultiRegionAccessPointPolicyCreate(d *schema.ResourceData, meta interface{}) error { - conn, err := S3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConnForMRAP(meta.(*conns.AWSClient)) if err != nil { return err @@ -113,7 +113,7 @@ func resourceMultiRegionAccessPointPolicyCreate(d *schema.ResourceData, meta int } func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta interface{}) error { - conn, err := S3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConnForMRAP(meta.(*conns.AWSClient)) if err != nil { return err @@ -160,7 +160,7 @@ func resourceMultiRegionAccessPointPolicyRead(d *schema.ResourceData, meta inter } func resourceMultiRegionAccessPointPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - conn, err := S3ControlConn(meta.(*conns.AWSClient)) + conn, err := S3ControlConnForMRAP(meta.(*conns.AWSClient)) if err != nil { return err diff --git a/internal/service/s3control/multi_region_access_point_policy_test.go b/internal/service/s3control/multi_region_access_point_policy_test.go index 4b90b3d29f19..c7ee4d0ede93 100644 --- a/internal/service/s3control/multi_region_access_point_policy_test.go +++ b/internal/service/s3control/multi_region_access_point_policy_test.go @@ -184,7 +184,7 @@ func testAccCheckMultiRegionAccessPointPolicyExists(n string, v *s3control.Multi return err } - conn, err := tfs3control.S3ControlConn(acctest.Provider.Meta().(*conns.AWSClient)) + conn, err := tfs3control.S3ControlConnForMRAP(acctest.Provider.Meta().(*conns.AWSClient)) if err != nil { return err diff --git a/internal/service/s3control/multi_region_access_point_test.go b/internal/service/s3control/multi_region_access_point_test.go index a0aa5b83a4d4..85cbc878fde1 100644 --- a/internal/service/s3control/multi_region_access_point_test.go +++ b/internal/service/s3control/multi_region_access_point_test.go @@ -215,7 +215,7 @@ func TestAccS3ControlMultiRegionAccessPoint_threeRegions(t *testing.T) { } func testAccCheckMultiRegionAccessPointDestroy(s *terraform.State) error { - conn, err := tfs3control.S3ControlConn(acctest.Provider.Meta().(*conns.AWSClient)) + conn, err := tfs3control.S3ControlConnForMRAP(acctest.Provider.Meta().(*conns.AWSClient)) if err != nil { return err @@ -265,7 +265,7 @@ func testAccCheckMultiRegionAccessPointExists(n string, v *s3control.MultiRegion return err } - conn, err := tfs3control.S3ControlConn(acctest.Provider.Meta().(*conns.AWSClient)) + conn, err := tfs3control.S3ControlConnForMRAP(acctest.Provider.Meta().(*conns.AWSClient)) if err != nil { return err From 5cc4414f62ff436f7f8c075331e0615f51ced4a7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 08:41:52 -0500 Subject: [PATCH 176/304] r/aws_s3control_object_lambda_access_point: Add flatteners. --- .../s3control/object_lambda_access_point.go | 121 ++++++++++-------- 1 file changed, 70 insertions(+), 51 deletions(-) diff --git a/internal/service/s3control/object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go index 1fa9607139b2..a0236ab24d19 100644 --- a/internal/service/s3control/object_lambda_access_point.go +++ b/internal/service/s3control/object_lambda_access_point.go @@ -159,7 +159,7 @@ func resourceObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{ return err } - _, err = FindObjectLambdaAccessPointByAccountIDAndName(conn, accountID, name) + output, err := FindObjectLambdaAccessPointByAccountIDAndName(conn, accountID, name) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Object Lambda Access Point (%s) not found, removing from state", d.Id()) @@ -179,6 +179,9 @@ func resourceObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{ Resource: fmt.Sprintf("accesspoint/%s", name), }.String() d.Set("arn", arn) + if err := d.Set("configuration", []interface{}{flattenObjectLambdaConfiguration(output)}); err != nil { + return fmt.Errorf("error setting configuration: %w", err) + } d.Set("name", name) return nil @@ -335,80 +338,96 @@ func expandAwsLambdaTransformation(tfMap map[string]interface{}) *s3control.AwsL return apiObject } -// func expandObjectLambdaContentTransformation(vConfig []interface{}) *s3control.ObjectLambdaContentTransformation { -// if len(vConfig) == 0 || vConfig[0] == nil { -// return nil -// } +func flattenObjectLambdaConfiguration(apiObject *s3control.ObjectLambdaConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } -// mConfig := vConfig[0].(map[string]interface{}) + tfMap := map[string]interface{}{} -// return &s3control.ObjectLambdaContentTransformation{ -// AwsLambda: &s3control.AwsLambdaTransformation{ -// FunctionArn: aws.String(mConfig["aws_lambda"]["function_arn"]), -// FunctionPayload: aws.String(mConfig["aws_lambda"]["function_payload"]), -// }, -// } + if v := apiObject.AllowedFeatures; v != nil { + tfMap["allowed_features"] = aws.StringValueSlice(v) + } -// } + if v := apiObject.CloudWatchMetricsEnabled; v != nil { + tfMap["cloud_watch_metrics_enabled"] = aws.BoolValue(v) + } + + if v := apiObject.SupportingAccessPoint; v != nil { + tfMap["supporting_access_point"] = aws.StringValue(v) + } -// func expandObjectLambdaTransformationConfiguration(vConfig []interface{}) *s3control.ObjectLambdaTransformationConfiguration { -// if len(vConfig) == 0 || vConfig[0] == nil { -// return nil -// } -// mConfig := vConfig[0].(map[string]interface{}) + if v := apiObject.TransformationConfigurations; v != nil { + tfMap["transformation_configuration"] = flattenObjectLambdaTransformationConfigurations(v) + } -// return &s3control.ObjectLambdaTransformationConfiguration{ -// Actions: expandStringSet(mConfig["actions"].(*schema.Set)), -// ContentTransformation: expandObjectLambdaContentTransformation(mConfig["content_transformation"].([]interface{})), -// } -// } + return tfMap +} -func expandS3ObjectLambdaAccessPointVpcConfiguration(vConfig []interface{}) *s3control.VpcConfiguration { - if len(vConfig) == 0 || vConfig[0] == nil { +func flattenObjectLambdaTransformationConfiguration(apiObject *s3control.ObjectLambdaTransformationConfiguration) map[string]interface{} { + if apiObject == nil { return nil } - mConfig := vConfig[0].(map[string]interface{}) + tfMap := map[string]interface{}{} + + if v := apiObject.Actions; v != nil { + tfMap["actions"] = aws.StringValueSlice(v) + } - return &s3control.VpcConfiguration{ - VpcId: aws.String(mConfig["vpc_id"].(string)), + if v := apiObject.ContentTransformation; v != nil { + tfMap["content_transformation"] = []interface{}{flattenObjectLambdaContentTransformation(v)} } + + return tfMap } -func flattenS3ObjectLambdaAccessPointVpcConfiguration(config *s3control.VpcConfiguration) []interface{} { - if config == nil { - return []interface{}{} +func flattenObjectLambdaTransformationConfigurations(apiObjects []*s3control.ObjectLambdaTransformationConfiguration) []interface{} { + if len(apiObjects) == 0 { + return nil } - return []interface{}{map[string]interface{}{ - "vpc_id": aws.StringValue(config.VpcId), - }} + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + tfList = append(tfList, flattenObjectLambdaTransformationConfiguration(apiObject)) + } + + return tfList } -func expandS3ObjectLambdaAccessPointPublicAccessBlockConfiguration(vConfig []interface{}) *s3control.PublicAccessBlockConfiguration { - if len(vConfig) == 0 || vConfig[0] == nil { +func flattenObjectLambdaContentTransformation(apiObject *s3control.ObjectLambdaContentTransformation) map[string]interface{} { + if apiObject == nil { return nil } - mConfig := vConfig[0].(map[string]interface{}) + tfMap := map[string]interface{}{} - return &s3control.PublicAccessBlockConfiguration{ - BlockPublicAcls: aws.Bool(mConfig["block_public_acls"].(bool)), - BlockPublicPolicy: aws.Bool(mConfig["block_public_policy"].(bool)), - IgnorePublicAcls: aws.Bool(mConfig["ignore_public_acls"].(bool)), - RestrictPublicBuckets: aws.Bool(mConfig["restrict_public_buckets"].(bool)), + if v := apiObject.AwsLambda; v != nil { + tfMap["aws_lambda"] = []interface{}{flattenAwsLambdaTransformation(v)} } + + return tfMap } -func flattenS3ObjectLambdaAccessPointPublicAccessBlockConfiguration(config *s3control.PublicAccessBlockConfiguration) []interface{} { - if config == nil { - return []interface{}{} +func flattenAwsLambdaTransformation(apiObject *s3control.AwsLambdaTransformation) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.FunctionArn; v != nil { + tfMap["function_arn"] = aws.StringValue(v) + } + + if v := apiObject.FunctionPayload; v != nil { + tfMap["function_payload"] = aws.StringValue(v) } - return []interface{}{map[string]interface{}{ - "block_public_acls": aws.BoolValue(config.BlockPublicAcls), - "block_public_policy": aws.BoolValue(config.BlockPublicPolicy), - "ignore_public_acls": aws.BoolValue(config.IgnorePublicAcls), - "restrict_public_buckets": aws.BoolValue(config.RestrictPublicBuckets), - }} + return tfMap } From fb7ad6ba9b40e596f062428a19662e29e17d326a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 09:20:55 -0500 Subject: [PATCH 177/304] r/aws_s3control_object_lambda_access_point: First passing acceptance test. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlObjectLambdaAccessPoint_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlObjectLambdaAccessPoint_basic -timeout 180m === RUN TestAccS3ControlObjectLambdaAccessPoint_basic === PAUSE TestAccS3ControlObjectLambdaAccessPoint_basic === CONT TestAccS3ControlObjectLambdaAccessPoint_basic --- PASS: TestAccS3ControlObjectLambdaAccessPoint_basic (54.55s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 58.420s --- .../s3control/object_lambda_access_point.go | 26 ++++- .../object_lambda_access_point_test.go | 98 ++++++++++++------ .../s3control/test-fixtures/lambdatest.zip | Bin 0 -> 342 bytes ...l_object_lambda_access_point.html.markdown | 2 +- 4 files changed, 91 insertions(+), 35 deletions(-) create mode 100644 internal/service/s3control/test-fixtures/lambdatest.zip diff --git a/internal/service/s3control/object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go index a0236ab24d19..bed278c008b6 100644 --- a/internal/service/s3control/object_lambda_access_point.go +++ b/internal/service/s3control/object_lambda_access_point.go @@ -85,7 +85,7 @@ func ResourceObjectLambdaAccessPoint() *schema.Resource { Schema: map[string]*schema.Schema{ "aws_lambda": { Type: schema.TypeList, - Optional: true, + Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -188,6 +188,30 @@ func resourceObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{ } func resourceObjectLambdaAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := ObjectLambdaAccessPointParseResourceID(d.Id()) + + if err != nil { + return err + } + + input := &s3control.PutAccessPointConfigurationForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + if v, ok := d.GetOk("configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.Configuration = expandObjectLambdaConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + + log.Printf("[DEBUG] Updating S3 Object Lambda Access Point: %s", input) + _, err = conn.PutAccessPointConfigurationForObjectLambda(input) + + if err != nil { + return fmt.Errorf("error updating S3 Object Lambda Access Point (%s): %w", d.Id(), err) + } + return resourceObjectLambdaAccessPointRead(d, meta) } diff --git a/internal/service/s3control/object_lambda_access_point_test.go b/internal/service/s3control/object_lambda_access_point_test.go index 6ecbbe4b509d..67568ccc6fdb 100644 --- a/internal/service/s3control/object_lambda_access_point_test.go +++ b/internal/service/s3control/object_lambda_access_point_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -12,12 +11,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccS3ControlObjectLambdaAccessPoint_basic(t *testing.T) { - var v s3control.GetAccessPointForObjectLambdaOutput - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var v s3control.ObjectLambdaConfiguration + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3control_object_lambda_access_point.test" resource.ParallelTest(t, resource.TestCase{ @@ -27,7 +26,7 @@ func TestAccS3ControlObjectLambdaAccessPoint_basic(t *testing.T) { CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Config: testAccObjectLambdaAccessPointConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckObjectLambdaAccessPointExists(resourceName, &v), ), @@ -42,9 +41,8 @@ func TestAccS3ControlObjectLambdaAccessPoint_basic(t *testing.T) { } func TestAccS3ControlObjectLambdaAccessPoint_disappears(t *testing.T) { - var v s3control.GetAccessPointForObjectLambdaOutput - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var v s3control.ObjectLambdaConfiguration + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3control_object_lambda_access_point.test" resource.ParallelTest(t, resource.TestCase{ @@ -54,7 +52,7 @@ func TestAccS3ControlObjectLambdaAccessPoint_disappears(t *testing.T) { CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Config: testAccObjectLambdaAccessPointConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckObjectLambdaAccessPointExists(resourceName, &v), acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceObjectLambdaAccessPoint(), resourceName), @@ -66,9 +64,8 @@ func TestAccS3ControlObjectLambdaAccessPoint_disappears(t *testing.T) { } func TestAccS3ControlObjectLambdaAccessPoint_disappears_Bucket(t *testing.T) { - var v s3control.GetAccessPointForObjectLambdaOutput - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var v s3control.ObjectLambdaConfiguration + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3control_object_lambda_access_point.test" bucketResourceName := "aws_s3_bucket.test" @@ -79,7 +76,7 @@ func TestAccS3ControlObjectLambdaAccessPoint_disappears_Bucket(t *testing.T) { CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName), + Config: testAccObjectLambdaAccessPointConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckObjectLambdaAccessPointExists(resourceName, &v), testAccCheckDestroyBucket(bucketResourceName), @@ -98,23 +95,29 @@ func testAccCheckObjectLambdaAccessPointDestroy(s *terraform.State) error { continue } - accountId, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) + accountID, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) + if err != nil { return err } - _, err = conn.GetAccessPointForObjectLambda(&s3control.GetAccessPointForObjectLambdaInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - if err == nil { - return fmt.Errorf("S3 Access Point still exists") + _, err = tfs3control.FindObjectLambdaAccessPointByAccountIDAndName(conn, accountID, name) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err } + + return fmt.Errorf("S3 Object Lambda Access Point %s still exists", rs.Primary.ID) } + return nil } -func testAccCheckObjectLambdaAccessPointExists(n string, output *s3control.GetAccessPointForObjectLambdaOutput) resource.TestCheckFunc { +func testAccCheckObjectLambdaAccessPointExists(n string, v *s3control.ObjectLambdaConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -122,39 +125,68 @@ func testAccCheckObjectLambdaAccessPointExists(n string, output *s3control.GetAc } if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Access Point ID is set") + return fmt.Errorf("No S3 Object Lambda Access Point is set") } - accountId, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) + accountID, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) + if err != nil { return err } conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - resp, err := conn.GetAccessPointForObjectLambda(&s3control.GetAccessPointForObjectLambdaInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + output, err := tfs3control.FindObjectLambdaAccessPointByAccountIDAndName(conn, accountID, name) + if err != nil { return err } - *output = *resp + *v = *output return nil } } -func testAccObjectLambdaAccessPointConfig_basic(bucketName, accessPointName string) string { - return fmt.Sprintf(` +func testAccObjectLambdaAccessPointBaseConfig(rName string) string { + return acctest.ConfigCompose(acctest.ConfigLambdaBase(rName, rName, rName), fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + role = aws_iam_role.iam_for_lambda.arn + handler = "index.handler" + runtime = "nodejs14.x" +} +`, rName)) +} + +func testAccObjectLambdaAccessPointConfig(rName string) string { + return acctest.ConfigCompose(testAccObjectLambdaAccessPointBaseConfig(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.id + name = %[1]q +} + resource "aws_s3control_object_lambda_access_point" "test" { - bucket = aws_s3_bucket.test.bucket - name = %[2]q + name = %[1]q + + configuration { + supporting_access_point = aws_s3_access_point.test.arn + + transformation_configuration { + actions = ["GetObject"] + + content_transformation { + aws_lambda { + function_arn = aws_lambda_function.test.arn + } + } + } + } } -`, bucketName, accessPointName) +`, rName)) } diff --git a/internal/service/s3control/test-fixtures/lambdatest.zip b/internal/service/s3control/test-fixtures/lambdatest.zip new file mode 100644 index 0000000000000000000000000000000000000000..5c636e955b2cccd992ac213993798acfdc39d6aa GIT binary patch literal 342 zcmWIWW@Zs#U|`^2_)xpcjj3pP&N2{>k%57iL53kGF*hkCu_U#)L@%p2G=!6ZndL`H zXdn=mR&X;gvU~-q18Xlm&k&i8#+QN6XCT<*-b`*@ggZ%;WfQT_nWzO%+$*O&s=9G|AR z&f>yR^AA6>@7sK}irLaRiOHq^)$z9dpQ3v8avCCzNC$W`GRZOH@~#BX;|vTyA2BRx d1hLRO&kFH8n#TjYS=m5}8G$euNWTSf7yyUqbSVG; literal 0 HcmV?d00001 diff --git a/website/docs/r/s3control_object_lambda_access_point.html.markdown b/website/docs/r/s3control_object_lambda_access_point.html.markdown index 85b6459cbba1..c1e069d45e15 100644 --- a/website/docs/r/s3control_object_lambda_access_point.html.markdown +++ b/website/docs/r/s3control_object_lambda_access_point.html.markdown @@ -70,7 +70,7 @@ The `transformation_configuration` block supports the following: The `content_transformation` block supports the following: -* `aws_lambda` - (Optional) Configuration for an AWS Lambda function. See [AWS Lambda](#aws-lambda) below for more details. +* `aws_lambda` - (Required) Configuration for an AWS Lambda function. See [AWS Lambda](#aws-lambda) below for more details. ### AWS Lambda From 6b07a24c3b9da33ffbaac46a71cde0ce5d15080d Mon Sep 17 00:00:00 2001 From: changelogbot Date: Mon, 15 Nov 2021 15:52:19 +0000 Subject: [PATCH 178/304] Update CHANGELOG.md for #21760 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34a501a24916..210379908798 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,12 +2,15 @@ FEATURES: +* **New Data Source:** `aws_emr_release_labels` ([#21767](https://github.com/hashicorp/terraform-provider-aws/issues/21767)) * **New Resource:** `aws_s3control_multi_region_access_point` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) * **New Resource:** `aws_s3control_multi_region_access_point_policy` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) ENHANCEMENTS: * resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) +* resource/aws_neptune_cluster: Support in-place update of `engine_version` ([#21760](https://github.com/hashicorp/terraform-provider-aws/issues/21760)) +* resource/aws_sagemaker_endpoint: Add `deployment_config` argument ([#21765](https://github.com/hashicorp/terraform-provider-aws/issues/21765)) BUG FIXES: From 2198b511d54ccd0fff83a4f9142f03a38902469d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 10:54:44 -0500 Subject: [PATCH 179/304] Add CHANGELOG entry. --- .changelog/21769.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21769.txt diff --git a/.changelog/21769.txt b/.changelog/21769.txt new file mode 100644 index 000000000000..1a54217e5fb5 --- /dev/null +++ b/.changelog/21769.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support of resource tagging +``` \ No newline at end of file From 3bc126404f5446a140ac00395e117768cbe6dee1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 10:57:50 -0500 Subject: [PATCH 180/304] Fix terrafmt errors. --- internal/service/iot/thing_type_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/iot/thing_type_test.go b/internal/service/iot/thing_type_test.go index 033259a8d479..72b1c64205d0 100644 --- a/internal/service/iot/thing_type_test.go +++ b/internal/service/iot/thing_type_test.go @@ -215,8 +215,8 @@ resource "aws_iot_thing_type" "foo" { func testAccIoTThingTypeTags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` resource "aws_iot_thing_type" "foo" { - name = "tf_acc_iot_thing_type_%[1]s" - deprecated = false + name = "tf_acc_iot_thing_type_%[1]s" + deprecated = false tags = { %[2]q = %[3]q @@ -228,8 +228,8 @@ resource "aws_iot_thing_type" "foo" { func testAccIoTThingTypeTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return fmt.Sprintf(` resource "aws_iot_thing_type" "foo" { - name = "tf_acc_iot_thing_type_%[1]s" - deprecated = false + name = "tf_acc_iot_thing_type_%[1]s" + deprecated = false tags = { %[2]q = %[3]q From 84a6d94e697c5cf69ea99310ca22a0cd228a73cb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 11:04:50 -0500 Subject: [PATCH 181/304] Update 21769.txt --- .changelog/21769.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.changelog/21769.txt b/.changelog/21769.txt index 1a54217e5fb5..fb7ac919c649 100644 --- a/.changelog/21769.txt +++ b/.changelog/21769.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support of resource tagging -``` \ No newline at end of file +resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support resource tagging +``` From bf0a696b374db4093354ade6ff55117593e3e465 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 11:10:07 -0500 Subject: [PATCH 182/304] r/aws_s3control_object_lambda_access_point: Acceptance tests passing. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlObjectLambdaAccessPoint_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlObjectLambdaAccessPoint_ -timeout 180m === RUN TestAccS3ControlObjectLambdaAccessPoint_basic === PAUSE TestAccS3ControlObjectLambdaAccessPoint_basic === RUN TestAccS3ControlObjectLambdaAccessPoint_disappears === PAUSE TestAccS3ControlObjectLambdaAccessPoint_disappears === CONT TestAccS3ControlObjectLambdaAccessPoint_basic === CONT TestAccS3ControlObjectLambdaAccessPoint_disappears --- PASS: TestAccS3ControlObjectLambdaAccessPoint_basic (42.28s) --- PASS: TestAccS3ControlObjectLambdaAccessPoint_disappears (47.13s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 50.624s --- internal/service/s3/sweep.go | 1 + .../s3control/object_lambda_access_point.go | 2 + .../object_lambda_access_point_test.go | 41 ++++++++----------- internal/service/s3control/sweep.go | 3 ++ 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/internal/service/s3/sweep.go b/internal/service/s3/sweep.go index bb92ddd11917..a0e0b26ac83b 100644 --- a/internal/service/s3/sweep.go +++ b/internal/service/s3/sweep.go @@ -33,6 +33,7 @@ func init() { Dependencies: []string{ "aws_s3_access_point", "aws_s3_bucket_object", + "aws_s3control_multi_region_access_point", }, }) } diff --git a/internal/service/s3control/object_lambda_access_point.go b/internal/service/s3control/object_lambda_access_point.go index bed278c008b6..b00a55445367 100644 --- a/internal/service/s3control/object_lambda_access_point.go +++ b/internal/service/s3control/object_lambda_access_point.go @@ -172,9 +172,11 @@ func resourceObjectLambdaAccessPointRead(d *schema.ResourceData, meta interface{ } d.Set("account_id", accountID) + // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3objectlambda.html#amazons3objectlambda-resources-for-iam-policies. arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, Service: "s3-object-lambda", + Region: meta.(*conns.AWSClient).Region, AccountID: accountID, Resource: fmt.Sprintf("accesspoint/%s", name), }.String() diff --git a/internal/service/s3control/object_lambda_access_point_test.go b/internal/service/s3control/object_lambda_access_point_test.go index 67568ccc6fdb..ab3135eb9cb6 100644 --- a/internal/service/s3control/object_lambda_access_point_test.go +++ b/internal/service/s3control/object_lambda_access_point_test.go @@ -18,6 +18,8 @@ func TestAccS3ControlObjectLambdaAccessPoint_basic(t *testing.T) { var v s3control.ObjectLambdaConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3control_object_lambda_access_point.test" + accessPointResourceName := "aws_s3_access_point.test" + lambdaFunctionResourceName := "aws_lambda_function.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -29,6 +31,21 @@ func TestAccS3ControlObjectLambdaAccessPoint_basic(t *testing.T) { Config: testAccObjectLambdaAccessPointConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckObjectLambdaAccessPointExists(resourceName, &v), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "s3-object-lambda", fmt.Sprintf("accesspoint/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.allowed_features.#", "0"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.cloud_watch_metrics_enabled", "false"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.supporting_access_point", accessPointResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.transformation_configuration.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "configuration.0.transformation_configuration.*", map[string]string{ + "actions.#": "1", + "content_transformation.#": "1", + "content_transformation.0.aws_lambda.#": "1", + "content_transformation.0.aws_lambda.0.function_payload": "", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.transformation_configuration.*.actions.*", "GetObject"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configuration.0.transformation_configuration.*.content_transformation.0.aws_lambda.0.function_arn", lambdaFunctionResourceName, "arn"), ), }, { @@ -63,30 +80,6 @@ func TestAccS3ControlObjectLambdaAccessPoint_disappears(t *testing.T) { }) } -func TestAccS3ControlObjectLambdaAccessPoint_disappears_Bucket(t *testing.T) { - var v s3control.ObjectLambdaConfiguration - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_s3control_object_lambda_access_point.test" - bucketResourceName := "aws_s3_bucket.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, - Steps: []resource.TestStep{ - { - Config: testAccObjectLambdaAccessPointConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectLambdaAccessPointExists(resourceName, &v), - testAccCheckDestroyBucket(bucketResourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - func testAccCheckObjectLambdaAccessPointDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index a865eb735d19..c10f319fdf65 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -19,6 +19,9 @@ func init() { resource.AddTestSweepers("aws_s3_access_point", &resource.Sweeper{ Name: "aws_s3_access_point", F: sweepAccessPoints, + Dependencies: []string{ + "aws_s3control_object_lambda_access_point", + }, }) resource.AddTestSweepers("aws_s3control_multi_region_access_point", &resource.Sweeper{ From e37bd09d251a6f4ad2cfe9d86d3fbacc992f227e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 11:29:04 -0500 Subject: [PATCH 183/304] r/aws_s3control_object_lambda_access_point: Add 'TestAccS3ControlObjectLambdaAccessPoint_update'. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlObjectLambdaAccessPoint_update' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlObjectLambdaAccessPoint_update -timeout 180m === RUN TestAccS3ControlObjectLambdaAccessPoint_update === PAUSE TestAccS3ControlObjectLambdaAccessPoint_update === CONT TestAccS3ControlObjectLambdaAccessPoint_update --- PASS: TestAccS3ControlObjectLambdaAccessPoint_update (74.66s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 80.519s --- .../object_lambda_access_point_test.go | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/internal/service/s3control/object_lambda_access_point_test.go b/internal/service/s3control/object_lambda_access_point_test.go index ab3135eb9cb6..1ac26e7b4f4b 100644 --- a/internal/service/s3control/object_lambda_access_point_test.go +++ b/internal/service/s3control/object_lambda_access_point_test.go @@ -80,6 +80,72 @@ func TestAccS3ControlObjectLambdaAccessPoint_disappears(t *testing.T) { }) } +func TestAccS3ControlObjectLambdaAccessPoint_update(t *testing.T) { + var v s3control.ObjectLambdaConfiguration + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3control_object_lambda_access_point.test" + accessPointResourceName := "aws_s3_access_point.test" + lambdaFunctionResourceName := "aws_lambda_function.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccObjectLambdaAccessPointOptionalsConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectLambdaAccessPointExists(resourceName, &v), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "s3-object-lambda", fmt.Sprintf("accesspoint/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.allowed_features.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.allowed_features.*", "GetObject-PartNumber"), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.allowed_features.*", "GetObject-Range"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.cloud_watch_metrics_enabled", "true"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.supporting_access_point", accessPointResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.transformation_configuration.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "configuration.0.transformation_configuration.*", map[string]string{ + "actions.#": "1", + "content_transformation.#": "1", + "content_transformation.0.aws_lambda.#": "1", + "content_transformation.0.aws_lambda.0.function_payload": "{\"res-x\": \"100\",\"res-y\": \"100\"}", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.transformation_configuration.*.actions.*", "GetObject"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configuration.0.transformation_configuration.*.content_transformation.0.aws_lambda.0.function_arn", lambdaFunctionResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccObjectLambdaAccessPointConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectLambdaAccessPointExists(resourceName, &v), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "s3-object-lambda", fmt.Sprintf("accesspoint/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.allowed_features.#", "0"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.cloud_watch_metrics_enabled", "false"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.supporting_access_point", accessPointResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.transformation_configuration.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "configuration.0.transformation_configuration.*", map[string]string{ + "actions.#": "1", + "content_transformation.#": "1", + "content_transformation.0.aws_lambda.#": "1", + "content_transformation.0.aws_lambda.0.function_payload": "", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.transformation_configuration.*.actions.*", "GetObject"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "configuration.0.transformation_configuration.*.content_transformation.0.aws_lambda.0.function_arn", lambdaFunctionResourceName, "arn"), + ), + }, + }, + }) +} + func testAccCheckObjectLambdaAccessPointDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn @@ -183,3 +249,37 @@ resource "aws_s3control_object_lambda_access_point" "test" { } `, rName)) } + +func testAccObjectLambdaAccessPointOptionalsConfig(rName string) string { + return acctest.ConfigCompose(testAccObjectLambdaAccessPointBaseConfig(rName), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.id + name = %[1]q +} + +resource "aws_s3control_object_lambda_access_point" "test" { + name = %[1]q + + configuration { + allowed_features = ["GetObject-Range", "GetObject-PartNumber"] + cloud_watch_metrics_enabled = true + supporting_access_point = aws_s3_access_point.test.arn + + transformation_configuration { + actions = ["GetObject"] + + content_transformation { + aws_lambda { + function_arn = aws_lambda_function.test.arn + function_payload = "{\"res-x\": \"100\",\"res-y\": \"100\"}" + } + } + } + } +} +`, rName)) +} From b0ca9fbb3730265ec565c1434b0fe750a2f898ff Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 13:14:39 -0500 Subject: [PATCH 184/304] r/aws_s3control_object_lambda_access_point_policy: New resource. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlObjectLambdaAccessPointPolicy_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlObjectLambdaAccessPointPolicy_basic -timeout 180m === RUN TestAccS3ControlObjectLambdaAccessPointPolicy_basic === PAUSE TestAccS3ControlObjectLambdaAccessPointPolicy_basic === CONT TestAccS3ControlObjectLambdaAccessPointPolicy_basic --- PASS: TestAccS3ControlObjectLambdaAccessPointPolicy_basic (45.52s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 49.227s --- .changelog/19294.txt | 4 + internal/provider/provider.go | 17 +- internal/service/s3control/find.go | 54 ++++++ .../object_lambda_access_point_policy.go | 159 ++++++++++++++++++ .../object_lambda_access_point_policy_test.go | 152 +++++++++++++++++ .../object_lambda_access_point_test.go | 2 +- 6 files changed, 379 insertions(+), 9 deletions(-) create mode 100644 internal/service/s3control/object_lambda_access_point_policy.go create mode 100644 internal/service/s3control/object_lambda_access_point_policy_test.go diff --git a/.changelog/19294.txt b/.changelog/19294.txt index a8d4357104e0..bdcc62bea539 100644 --- a/.changelog/19294.txt +++ b/.changelog/19294.txt @@ -1,3 +1,7 @@ ```release-note:new-resource aws_s3control_object_lambda_access_point +``` + +```release-note:new-resource +aws_s3control_object_lambda_access_point_policy ``` \ No newline at end of file diff --git a/internal/provider/provider.go b/internal/provider/provider.go index f62305f382e1..f6d11c7bee6c 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1478,14 +1478,15 @@ func Provider() *schema.Provider { "aws_s3_bucket_public_access_block": s3.ResourceBucketPublicAccessBlock(), "aws_s3_object_copy": s3.ResourceObjectCopy(), - "aws_s3_access_point": s3control.ResourceAccessPoint(), - "aws_s3_account_public_access_block": s3control.ResourceAccountPublicAccessBlock(), - "aws_s3control_bucket": s3control.ResourceBucket(), - "aws_s3control_bucket_lifecycle_configuration": s3control.ResourceBucketLifecycleConfiguration(), - "aws_s3control_bucket_policy": s3control.ResourceBucketPolicy(), - "aws_s3control_multi_region_access_point": s3control.ResourceMultiRegionAccessPoint(), - "aws_s3control_multi_region_access_point_policy": s3control.ResourceMultiRegionAccessPointPolicy(), - "aws_s3control_object_lambda_access_point": s3control.ResourceObjectLambdaAccessPoint(), + "aws_s3_access_point": s3control.ResourceAccessPoint(), + "aws_s3_account_public_access_block": s3control.ResourceAccountPublicAccessBlock(), + "aws_s3control_bucket": s3control.ResourceBucket(), + "aws_s3control_bucket_lifecycle_configuration": s3control.ResourceBucketLifecycleConfiguration(), + "aws_s3control_bucket_policy": s3control.ResourceBucketPolicy(), + "aws_s3control_multi_region_access_point": s3control.ResourceMultiRegionAccessPoint(), + "aws_s3control_multi_region_access_point_policy": s3control.ResourceMultiRegionAccessPointPolicy(), + "aws_s3control_object_lambda_access_point": s3control.ResourceObjectLambdaAccessPoint(), + "aws_s3control_object_lambda_access_point_policy": s3control.ResourceObjectLambdaAccessPointPolicy(), "aws_s3outposts_endpoint": s3outposts.ResourceEndpoint(), diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index 67f987d39286..052d6b217c9f 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -129,3 +129,57 @@ func FindObjectLambdaAccessPointByAccountIDAndName(conn *s3control.S3Control, ac return output.Configuration, nil } + +func FindObjectLambdaAccessPointPolicyAndStatusByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (string, *s3control.PolicyStatus, error) { + input1 := &s3control.GetAccessPointPolicyForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + output1, err := conn.GetAccessPointPolicyForObjectLambda(input1) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + return "", nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input1, + } + } + + if err != nil { + return "", nil, err + } + + if output1 == nil { + return "", nil, tfresource.NewEmptyResultError(input1) + } + + policy := aws.StringValue(output1.Policy) + + if policy == "" { + return "", nil, tfresource.NewEmptyResultError(input1) + } + + input2 := &s3control.GetAccessPointPolicyStatusForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + output2, err := conn.GetAccessPointPolicyStatusForObjectLambda(input2) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + return "", nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input2, + } + } + + if err != nil { + return "", nil, err + } + + if output2 == nil || output2.PolicyStatus == nil { + return "", nil, tfresource.NewEmptyResultError(input2) + } + + return policy, output2.PolicyStatus, nil +} diff --git a/internal/service/s3control/object_lambda_access_point_policy.go b/internal/service/s3control/object_lambda_access_point_policy.go new file mode 100644 index 000000000000..6df3d82cd0b1 --- /dev/null +++ b/internal/service/s3control/object_lambda_access_point_policy.go @@ -0,0 +1,159 @@ +package s3control + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceObjectLambdaAccessPointPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceObjectLambdaAccessPointPolicyCreate, + Read: resourceObjectLambdaAccessPointPolicyRead, + Update: resourceObjectLambdaAccessPointPolicyUpdate, + Delete: resourceObjectLambdaAccessPointPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidAccountID, + }, + "has_public_access_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, + }, + }, + } +} + +func resourceObjectLambdaAccessPointPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID := meta.(*conns.AWSClient).AccountID + if v, ok := d.GetOk("account_id"); ok { + accountID = v.(string) + } + name := d.Get("name").(string) + resourceID := ObjectLambdaAccessPointCreateResourceID(accountID, name) + + input := &s3control.PutAccessPointPolicyForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + Policy: aws.String(d.Get("policy").(string)), + } + + log.Printf("[DEBUG] Creating S3 Object Lambda Access Point Policy: %s", input) + _, err := conn.PutAccessPointPolicyForObjectLambda(input) + + if err != nil { + return fmt.Errorf("error creating S3 Object Lambda Access Point (%s) Policy: %w", resourceID, err) + } + + d.SetId(resourceID) + + return resourceObjectLambdaAccessPointPolicyRead(d, meta) +} + +func resourceObjectLambdaAccessPointPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := ObjectLambdaAccessPointParseResourceID(d.Id()) + + if err != nil { + return err + } + + policy, status, err := FindObjectLambdaAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] S3 Object Lambda Access Point Policy (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Object Lambda Access Point Policy (%s): %w", d.Id(), err) + } + + d.Set("account_id", accountID) + d.Set("has_public_access_policy", status.IsPublic) + d.Set("name", name) + d.Set("policy", policy) + + return nil +} + +func resourceObjectLambdaAccessPointPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := ObjectLambdaAccessPointParseResourceID(d.Id()) + + if err != nil { + return err + } + + input := &s3control.PutAccessPointPolicyForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + Policy: aws.String(d.Get("policy").(string)), + } + + log.Printf("[DEBUG] Updating S3 Object Lambda Access Point Policy: %s", input) + _, err = conn.PutAccessPointPolicyForObjectLambda(input) + + if err != nil { + return fmt.Errorf("error updating S3 Object Lambda Access Point Policy (%s): %w", d.Id(), err) + } + + return resourceObjectLambdaAccessPointPolicyRead(d, meta) +} + +func resourceObjectLambdaAccessPointPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := ObjectLambdaAccessPointParseResourceID(d.Id()) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting S3 Object Lambda Access Point Policy: %s", d.Id()) + _, err = conn.DeleteAccessPointPolicyForObjectLambda(&s3control.DeleteAccessPointPolicyForObjectLambdaInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + }) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting S3 Object Lambda Access Point Policy (%s): %w", d.Id(), err) + } + + return nil +} diff --git a/internal/service/s3control/object_lambda_access_point_policy_test.go b/internal/service/s3control/object_lambda_access_point_policy_test.go new file mode 100644 index 000000000000..d2cf076e6f44 --- /dev/null +++ b/internal/service/s3control/object_lambda_access_point_policy_test.go @@ -0,0 +1,152 @@ +package s3control_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/s3control" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccS3ControlObjectLambdaAccessPointPolicy_basic(t *testing.T) { + resourceName := "aws_s3control_object_lambda_access_point_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccObjectLambdaAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectLambdaAccessPointPolicyExists(resourceName), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrSet(resourceName, "policy"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckObjectLambdaAccessPointPolicyDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3control_object_lambda_access_point_policy" { + continue + } + + accountID, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) + + if err != nil { + return err + } + + _, _, err = tfs3control.FindObjectLambdaAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("S3 Object Lambda Access Point Policy %s still exists", rs.Primary.ID) + } + + return nil +} + +func testAccCheckObjectLambdaAccessPointPolicyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No S3 Object Lambda Access Point Policy ID is set") + } + + accountID, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) + + if err != nil { + return err + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + + _, _, err = tfs3control.FindObjectLambdaAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) + + if err != nil { + return err + } + + return nil + } +} + +func testAccObjectLambdaAccessPointPolicyConfig(rName string) string { + return acctest.ConfigCompose(testAccObjectLambdaAccessPointBaseConfig(rName), fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.id + name = %[1]q +} + +resource "aws_s3control_object_lambda_access_point" "test" { + name = %[1]q + + configuration { + supporting_access_point = aws_s3_access_point.test.arn + + transformation_configuration { + actions = ["GetObject"] + + content_transformation { + aws_lambda { + function_arn = aws_lambda_function.test.arn + } + } + } + } +} + +resource "aws_s3control_object_lambda_access_point_policy" "test" { + name = aws_s3control_object_lambda_access_point.test.name + + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Action = "s3-object-lambda:GetObject" + Principal = { + AWS = data.aws_caller_identity.current.account_id + } + Resource = aws_s3control_object_lambda_access_point.test.arn + }] + }) +} +`, rName)) +} diff --git a/internal/service/s3control/object_lambda_access_point_test.go b/internal/service/s3control/object_lambda_access_point_test.go index 1ac26e7b4f4b..bcb06681d4a6 100644 --- a/internal/service/s3control/object_lambda_access_point_test.go +++ b/internal/service/s3control/object_lambda_access_point_test.go @@ -184,7 +184,7 @@ func testAccCheckObjectLambdaAccessPointExists(n string, v *s3control.ObjectLamb } if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Object Lambda Access Point is set") + return fmt.Errorf("No S3 Object Lambda Access Point ID is set") } accountID, name, err := tfs3control.ObjectLambdaAccessPointParseResourceID(rs.Primary.ID) From 80973618ad9d812f088524a953e711079dbb8369 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 13:22:40 -0500 Subject: [PATCH 185/304] r/aws_s3control_object_lambda_access_point_policy: Add documentation. --- ...t_lambda_access_point_policy.html.markdown | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 website/docs/r/s3control_object_lambda_access_point_policy.html.markdown diff --git a/website/docs/r/s3control_object_lambda_access_point_policy.html.markdown b/website/docs/r/s3control_object_lambda_access_point_policy.html.markdown new file mode 100644 index 000000000000..ba8e5214ba90 --- /dev/null +++ b/website/docs/r/s3control_object_lambda_access_point_policy.html.markdown @@ -0,0 +1,81 @@ +--- +subcategory: "S3 Control" +layout: "aws" +page_title: "AWS: aws_s3control_object_lambda_access_point_policy" +description: |- + Provides a resource to manage an S3 Object Lambda Access Point resource policy. +--- + +# Resource: aws_s3control_object_lambda_access_point_policy + +Provides a resource to manage an S3 Object Lambda Access Point resource policy. + +## Example Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "example" +} + +resource "aws_s3_access_point" "example" { + bucket = aws_s3_bucket.example.id + name = "example" +} + +resource "aws_s3control_object_lambda_access_point" "example" { + name = "example" + + configuration { + supporting_access_point = aws_s3_access_point.example.arn + + transformation_configuration { + actions = ["GetObject"] + + content_transformation { + aws_lambda { + function_arn = aws_lambda_function.example.arn + } + } + } + } +} + +resource "aws_s3control_object_lambda_access_point_policy" "example" { + name = aws_s3control_object_lambda_access_point.example.name + + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Action = "s3-object-lambda:GetObject" + Principal = { + AWS = data.aws_caller_identity.current.account_id + } + Resource = aws_s3control_object_lambda_access_point.example.arn + }] + }) +} +``` + +## Argument Reference + +The following arguments are supported: + +* `account_id` - (Optional) The AWS account ID for the account that owns the Object Lambda Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. +* `name` - (Required) The name of the Object Lambda Access Point. +* `policy` - (Required) The Object Lambda Access Point resource policy document. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `has_public_access_policy` - Indicates whether this access point currently has a policy that allows public access. +* `id` - The AWS account ID and access point name separated by a colon (`:`). + +## Import + +Object Lambda Access Point policies can be imported using the `account_id` and `name`, separated by a colon (`:`), e.g. + +``` +$ terraform import aws_s3control_object_lambda_access_point_policy.example 123456789012:example +``` From c292618734a9585ba64798e596bd68104c025566 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 13:40:17 -0500 Subject: [PATCH 186/304] Fix terrafmt errors. --- .../securityhub/finding_aggregator_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/service/securityhub/finding_aggregator_test.go b/internal/service/securityhub/finding_aggregator_test.go index a06ef5cec560..15eefa62d34e 100644 --- a/internal/service/securityhub/finding_aggregator_test.go +++ b/internal/service/securityhub/finding_aggregator_test.go @@ -133,9 +133,9 @@ func testAccFindingAggregatorAllRegionsConfig() string { resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "test_aggregator" { - linking_mode = "ALL_REGIONS" + linking_mode = "ALL_REGIONS" - depends_on = [aws_securityhub_account.example] + depends_on = [aws_securityhub_account.example] } ` } @@ -145,10 +145,10 @@ func testAccFindingAggregatorSpecifiedRegionsConfig() string { resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "test_aggregator" { - linking_mode = "SPECIFIED_REGIONS" - specified_regions = ["%s", "%s", "%s"] + linking_mode = "SPECIFIED_REGIONS" + specified_regions = ["%s", "%s", "%s"] - depends_on = [aws_securityhub_account.example] + depends_on = [aws_securityhub_account.example] } `, endpoints.EuWest1RegionID, endpoints.EuWest2RegionID, endpoints.UsEast1RegionID) } @@ -158,10 +158,10 @@ func testAccFindingAggregatorAllRegionsExceptSpecifiedConfig() string { resource "aws_securityhub_account" "example" {} resource "aws_securityhub_finding_aggregator" "test_aggregator" { - linking_mode = "ALL_REGIONS_EXCEPT_SPECIFIED" - specified_regions = ["%s", "%s"] + linking_mode = "ALL_REGIONS_EXCEPT_SPECIFIED" + specified_regions = ["%s", "%s"] - depends_on = [aws_securityhub_account.example] + depends_on = [aws_securityhub_account.example] } `, endpoints.EuWest1RegionID, endpoints.EuWest2RegionID) } From 58a1e4f706a8f0a7df3b0d91de00d8de12d5ae03 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 13:42:56 -0500 Subject: [PATCH 187/304] Fix tfproviderdocs errors. --- website/docs/r/securityhub_finding_aggregator.markdown | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/website/docs/r/securityhub_finding_aggregator.markdown b/website/docs/r/securityhub_finding_aggregator.markdown index 503797664bbd..51c53ded0af9 100644 --- a/website/docs/r/securityhub_finding_aggregator.markdown +++ b/website/docs/r/securityhub_finding_aggregator.markdown @@ -10,7 +10,9 @@ description: |- Manages a Security Hub finding aggregator. Security Hub needs to be enabled in a region in order for the aggregator to pull through findings. -## All Regions Usage +## Example Usage + +### All Regions Usage The following example will enable the aggregator for every region. @@ -24,7 +26,7 @@ resource "aws_securityhub_finding_aggregator" "example" { } ``` -## All Regions Except Specified Regions Usage +### All Regions Except Specified Regions Usage The following example will enable the aggregator for every region except those specified in `specified_regions`. @@ -39,7 +41,7 @@ resource "aws_securityhub_finding_aggregator" "example" { } ``` -## Specified Regions Usage +### Specified Regions Usage The following example will enable the aggregator for every region specified in `specified_regions`. From 97e5f2bf2783a8f53cfed58d11ba9043c47a2dd0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 13:44:17 -0500 Subject: [PATCH 188/304] r/aws_s3control_object_lambda_access_point_policy: Passing acceptance tests. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlObjectLambdaAccessPointPolicy_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlObjectLambdaAccessPointPolicy_ -timeout 180m === RUN TestAccS3ControlObjectLambdaAccessPointPolicy_basic === PAUSE TestAccS3ControlObjectLambdaAccessPointPolicy_basic === RUN TestAccS3ControlObjectLambdaAccessPointPolicy_disappears === PAUSE TestAccS3ControlObjectLambdaAccessPointPolicy_disappears === RUN TestAccS3ControlObjectLambdaAccessPointPolicy_disappears_AccessPoint === PAUSE TestAccS3ControlObjectLambdaAccessPointPolicy_disappears_AccessPoint === RUN TestAccS3ControlObjectLambdaAccessPointPolicy_update === PAUSE TestAccS3ControlObjectLambdaAccessPointPolicy_update === CONT TestAccS3ControlObjectLambdaAccessPointPolicy_basic === CONT TestAccS3ControlObjectLambdaAccessPointPolicy_disappears_AccessPoint === CONT TestAccS3ControlObjectLambdaAccessPointPolicy_disappears === CONT TestAccS3ControlObjectLambdaAccessPointPolicy_update --- PASS: TestAccS3ControlObjectLambdaAccessPointPolicy_disappears_AccessPoint (51.24s) --- PASS: TestAccS3ControlObjectLambdaAccessPointPolicy_basic (56.99s) --- PASS: TestAccS3ControlObjectLambdaAccessPointPolicy_disappears (69.55s) --- PASS: TestAccS3ControlObjectLambdaAccessPointPolicy_update (94.41s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 102.627s --- internal/service/s3control/find.go | 4 +- .../object_lambda_access_point_policy.go | 2 +- .../object_lambda_access_point_policy_test.go | 133 ++++++++++++++++++ 3 files changed, 136 insertions(+), 3 deletions(-) diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index 052d6b217c9f..00cb3e66f36a 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -138,7 +138,7 @@ func FindObjectLambdaAccessPointPolicyAndStatusByAccountIDAndName(conn *s3contro output1, err := conn.GetAccessPointPolicyForObjectLambda(input1) - if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { return "", nil, &resource.NotFoundError{ LastError: err, LastRequest: input1, @@ -166,7 +166,7 @@ func FindObjectLambdaAccessPointPolicyAndStatusByAccountIDAndName(conn *s3contro output2, err := conn.GetAccessPointPolicyStatusForObjectLambda(input2) - if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { return "", nil, &resource.NotFoundError{ LastError: err, LastRequest: input2, diff --git a/internal/service/s3control/object_lambda_access_point_policy.go b/internal/service/s3control/object_lambda_access_point_policy.go index 6df3d82cd0b1..b0f677e3d333 100644 --- a/internal/service/s3control/object_lambda_access_point_policy.go +++ b/internal/service/s3control/object_lambda_access_point_policy.go @@ -147,7 +147,7 @@ func resourceObjectLambdaAccessPointPolicyDelete(d *schema.ResourceData, meta in Name: aws.String(name), }) - if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { return nil } diff --git a/internal/service/s3control/object_lambda_access_point_policy_test.go b/internal/service/s3control/object_lambda_access_point_policy_test.go index d2cf076e6f44..b888648efda4 100644 --- a/internal/service/s3control/object_lambda_access_point_policy_test.go +++ b/internal/service/s3control/object_lambda_access_point_policy_test.go @@ -43,6 +43,90 @@ func TestAccS3ControlObjectLambdaAccessPointPolicy_basic(t *testing.T) { }) } +func TestAccS3ControlObjectLambdaAccessPointPolicy_disappears(t *testing.T) { + resourceName := "aws_s3control_object_lambda_access_point_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccObjectLambdaAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectLambdaAccessPointPolicyExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceObjectLambdaAccessPointPolicy(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3ControlObjectLambdaAccessPointPolicy_disappears_AccessPoint(t *testing.T) { + resourceName := "aws_s3control_object_lambda_access_point_policy.test" + accessPointResourceName := "aws_s3control_object_lambda_access_point_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccObjectLambdaAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectLambdaAccessPointPolicyExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceObjectLambdaAccessPoint(), accessPointResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3ControlObjectLambdaAccessPointPolicy_update(t *testing.T) { + resourceName := "aws_s3control_object_lambda_access_point_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckObjectLambdaAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccObjectLambdaAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectLambdaAccessPointPolicyExists(resourceName), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrSet(resourceName, "policy"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccObjectLambdaAccessPointPolicyUpdatedConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectLambdaAccessPointPolicyExists(resourceName), + acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrSet(resourceName, "policy"), + ), + }, + }, + }) +} + func testAccCheckObjectLambdaAccessPointPolicyDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn @@ -150,3 +234,52 @@ resource "aws_s3control_object_lambda_access_point_policy" "test" { } `, rName)) } + +func testAccObjectLambdaAccessPointPolicyUpdatedConfig(rName string) string { + return acctest.ConfigCompose(testAccObjectLambdaAccessPointBaseConfig(rName), fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.id + name = %[1]q +} + +resource "aws_s3control_object_lambda_access_point" "test" { + name = %[1]q + + configuration { + supporting_access_point = aws_s3_access_point.test.arn + + transformation_configuration { + actions = ["GetObject"] + + content_transformation { + aws_lambda { + function_arn = aws_lambda_function.test.arn + } + } + } + } +} + +resource "aws_s3control_object_lambda_access_point_policy" "test" { + name = aws_s3control_object_lambda_access_point.test.name + + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Action = "s3-object-lambda:*" + Principal = { + AWS = data.aws_caller_identity.current.account_id + } + Resource = aws_s3control_object_lambda_access_point.test.arn + }] + }) +} +`, rName)) +} From fadac94ce864051f4f97dc39a092cd3cb123bd32 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 15 Nov 2021 11:08:09 -0800 Subject: [PATCH 189/304] Fixes error where empty `checkpoint_percentages` is submitted to the API --- internal/service/autoscaling/group.go | 3 +- internal/service/autoscaling/group_test.go | 77 ++++++++++++++++++++++ 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/internal/service/autoscaling/group.go b/internal/service/autoscaling/group.go index b83e3e2c42ef..9602e9417821 100644 --- a/internal/service/autoscaling/group.go +++ b/internal/service/autoscaling/group.go @@ -2217,8 +2217,7 @@ func expandAutoScalingGroupInstanceRefreshPreferences(l []interface{}) *autoscal } } - if v, ok := m["checkpoint_percentages"]; ok { - l := v.([]interface{}) + if l, ok := m["checkpoint_percentages"].([]interface{}); ok && len(l) > 0 { p := make([]*int64, len(l)) for i, v := range l { p[i] = aws.Int64(int64(v.(int))) diff --git a/internal/service/autoscaling/group_test.go b/internal/service/autoscaling/group_test.go index 9930009401de..9a73980219a0 100644 --- a/internal/service/autoscaling/group_test.go +++ b/internal/service/autoscaling/group_test.go @@ -973,6 +973,19 @@ func TestAccAutoScalingGroup_InstanceRefresh_basic(t *testing.T) { "instance_refresh", }, }, + { + Config: testAccGroupConfig_InstanceRefresh_MinHealthyPercentage(), + Check: resource.ComposeTestCheckFunc( + testAccCheckGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.0.instance_warmup", ""), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.0.min_healthy_percentage", "0"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.0.checkpoint_delay", ""), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.0.checkpoint_percentages.#", "0"), + ), + }, { Config: testAccGroupConfig_InstanceRefresh_Full(), Check: resource.ComposeTestCheckFunc( @@ -4474,6 +4487,49 @@ resource "aws_launch_configuration" "test" { ` } +func testAccGroupConfig_InstanceRefresh_MinHealthyPercentage() string { + return ` +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.current.names[0]] + max_size = 2 + min_size = 1 + desired_capacity = 1 + launch_configuration = aws_launch_configuration.test.name + + instance_refresh { + strategy = "Rolling" + preferences { + min_healthy_percentage = 0 + } + } +} + +data "aws_ami" "test" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + +data "aws_availability_zones" "current" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_launch_configuration" "test" { + image_id = data.aws_ami.test.id + instance_type = "t3.nano" +} +` +} + func testAccGroupConfig_InstanceRefresh_Full() string { return ` resource "aws_autoscaling_group" "test" { @@ -4968,6 +5024,27 @@ func TestCreateAutoScalingGroupInstanceRefreshInput(t *testing.T) { }, }, }, + { + name: "checkpoint_percentages empty", + input: []interface{}{map[string]interface{}{ + "strategy": "Rolling", + "preferences": []interface{}{ + map[string]interface{}{ + "checkpoint_percentages": []interface{}{}, + }, + }, + }}, + expected: &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String("Rolling"), + Preferences: &autoscaling.RefreshPreferences{ + CheckpointDelay: nil, + CheckpointPercentages: nil, + InstanceWarmup: nil, + MinHealthyPercentage: nil, + }, + }, + }, { name: "checkpoint_percentages", input: []interface{}{map[string]interface{}{ From 4bad1e9aee3d845eb955da2aa04cdcd8ae948228 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 14:39:58 -0500 Subject: [PATCH 190/304] Fix golangci-lint error: 'directive ... unused for linter "deadcode" (nolintlint)'. --- internal/service/s3control/errors.go | 1 - .../service/s3control/object_lambda_access_point_policy_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/service/s3control/errors.go b/internal/service/s3control/errors.go index adc8d4e5ec4e..c69def0cdea9 100644 --- a/internal/service/s3control/errors.go +++ b/internal/service/s3control/errors.go @@ -2,7 +2,6 @@ package s3control // Error code constants missing from AWS Go SDK: // https://docs.aws.amazon.com/sdk-for-go/api/service/s3control/#pkg-constants -//nolint:deadcode,varcheck // These constants are missing from the AWS SDK const ( errCodeNoSuchAccessPoint = "NoSuchAccessPoint" errCodeNoSuchAccessPointPolicy = "NoSuchAccessPointPolicy" diff --git a/internal/service/s3control/object_lambda_access_point_policy_test.go b/internal/service/s3control/object_lambda_access_point_policy_test.go index b888648efda4..d81e3bfd698e 100644 --- a/internal/service/s3control/object_lambda_access_point_policy_test.go +++ b/internal/service/s3control/object_lambda_access_point_policy_test.go @@ -67,7 +67,7 @@ func TestAccS3ControlObjectLambdaAccessPointPolicy_disappears(t *testing.T) { func TestAccS3ControlObjectLambdaAccessPointPolicy_disappears_AccessPoint(t *testing.T) { resourceName := "aws_s3control_object_lambda_access_point_policy.test" - accessPointResourceName := "aws_s3control_object_lambda_access_point_policy.test" + accessPointResourceName := "aws_s3control_object_lambda_access_point.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ From 8259425764c9c3a01476c3b031b3eb4038471eea Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 15 Nov 2021 13:54:34 -0800 Subject: [PATCH 191/304] Adds tests for multiple organizational unit distinguished names --- .../appstream/directory_config_test.go | 146 +++++++++++++++--- 1 file changed, 128 insertions(+), 18 deletions(-) diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index 6c9ddfaf14ee..39c3f2870795 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -2,6 +2,7 @@ package appstream_test import ( "fmt" + "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -16,13 +17,14 @@ import ( ) func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { - var directoryOutput appstream.DirectoryConfig + var v1, v2 appstream.DirectoryConfig resourceName := "aws_appstream_directory_config.test" - rName := acctest.RandomDomainName() - rUserName := fmt.Sprintf("%s\\%s", rName, sdkacctest.RandString(10)) + domain := acctest.RandomDomainName() + rUserName := fmt.Sprintf("%s\\%s", domain, sdkacctest.RandString(10)) rPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - rUserNameUpdated := fmt.Sprintf("%s\\%s", rName, sdkacctest.RandString(10)) + rUserNameUpdated := fmt.Sprintf("%s\\%s", domain, sdkacctest.RandString(10)) rPasswordUpdated := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + orgUnitDN := orgUnitFromDomain("Test", domain) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -31,23 +33,28 @@ func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { ErrorCheck: acctest.ErrorCheck(t, appstream.EndpointsID), Steps: []resource.TestStep{ { - Config: testAccDirectoryConfigConfig(rName, rUserName, rPassword), + Config: testAccDirectoryConfigConfig(domain, rUserName, rPassword, orgUnitDN), Check: resource.ComposeTestCheckFunc( - testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), - resource.TestCheckResourceAttr(resourceName, "directory_name", rName), + testAccCheckDirectoryConfigExists(resourceName, &v1), + resource.TestCheckResourceAttr(resourceName, "directory_name", domain), acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "1"), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.0", orgUnitDN), + resource.TestCheckResourceAttr(resourceName, "service_account_credentials.#", "1"), resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_name", rUserName), resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_password", rPassword), ), }, { - Config: testAccDirectoryConfigConfig(rName, rUserNameUpdated, rPasswordUpdated), + Config: testAccDirectoryConfigConfig(domain, rUserNameUpdated, rPasswordUpdated, orgUnitDN), Check: resource.ComposeTestCheckFunc( - testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), - resource.TestCheckResourceAttr(resourceName, "directory_name", rName), + testAccCheckDirectoryConfigExists(resourceName, &v2), + testAccCheckDirectoryConfigNotRecreated(&v1, &v2), + resource.TestCheckResourceAttr(resourceName, "directory_name", domain), acctest.CheckResourceAttrRFC3339(resourceName, "created_time"), resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "1"), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.0", orgUnitDN), + resource.TestCheckResourceAttr(resourceName, "service_account_credentials.#", "1"), resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_name", rUserNameUpdated), resource.TestCheckResourceAttr(resourceName, "service_account_credentials.0.account_password", rPasswordUpdated), ), @@ -63,11 +70,12 @@ func TestAccAppStreamDirectoryConfig_basic(t *testing.T) { } func TestAccAppStreamDirectoryConfig_disappears(t *testing.T) { - var directoryOutput appstream.DirectoryConfig + var v appstream.DirectoryConfig resourceName := "aws_appstream_directory_config.test" - rName := acctest.RandomDomainName() - rUserName := fmt.Sprintf("%s\\%s", rName, sdkacctest.RandString(10)) + domain := acctest.RandomDomainName() + rUserName := fmt.Sprintf("%s\\%s", domain, sdkacctest.RandString(10)) rPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + orgUnitDN := orgUnitFromDomain("Test", domain) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -76,9 +84,9 @@ func TestAccAppStreamDirectoryConfig_disappears(t *testing.T) { ErrorCheck: acctest.ErrorCheck(t, appstream.EndpointsID), Steps: []resource.TestStep{ { - Config: testAccDirectoryConfigConfig(rName, rUserName, rPassword), + Config: testAccDirectoryConfigConfig(domain, rUserName, rPassword, orgUnitDN), Check: resource.ComposeTestCheckFunc( - testAccCheckDirectoryConfigExists(resourceName, &directoryOutput), + testAccCheckDirectoryConfigExists(resourceName, &v), acctest.CheckResourceDisappears(acctest.Provider, tfappstream.ResourceDirectoryConfig(), resourceName), ), ExpectNonEmptyPlan: true, @@ -87,6 +95,53 @@ func TestAccAppStreamDirectoryConfig_disappears(t *testing.T) { }) } +func TestAccAppStreamDirectoryConfig_OrganizationalUnitDistinguishedNames(t *testing.T) { + var v1, v2, v3 appstream.DirectoryConfig + resourceName := "aws_appstream_directory_config.test" + domain := acctest.RandomDomainName() + rUserName := fmt.Sprintf("%s\\%s", domain, sdkacctest.RandString(10)) + rPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + orgUnitDN1 := orgUnitFromDomain("One", domain) + orgUnitDN2 := orgUnitFromDomain("Two", domain) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckDirectoryConfigDestroy, + ErrorCheck: acctest.ErrorCheck(t, appstream.EndpointsID), + Steps: []resource.TestStep{ + { + Config: testAccDirectoryConfigConfig(domain, rUserName, rPassword, orgUnitDN1), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryConfigExists(resourceName, &v1), + resource.TestCheckResourceAttr(resourceName, "directory_name", domain), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "1"), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.0", orgUnitDN1), + ), + }, + { + Config: testAccDirectoryConfig_OrganizationalUnitDistinguishedNamesConfig(domain, rUserName, rPassword, orgUnitDN1, orgUnitDN2), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryConfigExists(resourceName, &v2), + resource.TestCheckResourceAttr(resourceName, "directory_name", domain), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "2"), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.0", orgUnitDN1), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.1", orgUnitDN2), + ), + }, + { + Config: testAccDirectoryConfigConfig(domain, rUserName, rPassword, orgUnitDN2), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryConfigExists(resourceName, &v3), + resource.TestCheckResourceAttr(resourceName, "directory_name", domain), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.#", "1"), + resource.TestCheckResourceAttr(resourceName, "organizational_unit_distinguished_names.0", orgUnitDN2), + ), + }, + }, + }) +} + func testAccCheckDirectoryConfigExists(resourceName string, appStreamDirectoryConfig *appstream.DirectoryConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -137,18 +192,73 @@ func testAccCheckDirectoryConfigDestroy(s *terraform.State) error { return nil } -func testAccDirectoryConfigConfig(name, userName, password string) string { +func testAccCheckDirectoryConfigNotRecreated(i, j *appstream.DirectoryConfig) resource.TestCheckFunc { + return func(s *terraform.State) error { + if !aws.TimeValue(i.CreatedTime).Equal(aws.TimeValue(j.CreatedTime)) { + return fmt.Errorf("AppStream Directory Config recreated") + } + + return nil + } +} + +func orgUnitFromDomain(orgUnit, domainName string) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("OU=%s", orgUnit)) + for _, dc := range strings.Split(domainName, ".") { + sb.WriteString(fmt.Sprintf(" DC=%s", dc)) + } + return sb.String() +} + +func testAccDirectoryConfigConfig(domain, userName, password, orgUnitDN string) string { + return acctest.ConfigCompose( + acctest.ConfigVpcWithSubnets(2), + fmt.Sprintf(` +resource "aws_appstream_directory_config" "test" { + directory_name = %[1]q + organizational_unit_distinguished_names = [%[4]q] + + service_account_credentials { + account_name = %[2]q + account_password = %[3]q + } + + depends_on = [ + aws_directory_service_directory.test + ] +} + +resource "aws_directory_service_directory" "test" { + name = %[1]q + password = %[3]q + edition = "Standard" + type = "MicrosoftAD" + + vpc_settings { + vpc_id = aws_vpc.test.id + subnet_ids = aws_subnet.test[*].id + } +} +`, domain, userName, password, orgUnitDN)) +} + +func testAccDirectoryConfig_OrganizationalUnitDistinguishedNamesConfig(domain, userName, password, orgUnitDN1, orgUnitDN2 string) string { return acctest.ConfigCompose( acctest.ConfigVpcWithSubnets(2), fmt.Sprintf(` resource "aws_appstream_directory_config" "test" { directory_name = %[1]q - organizational_unit_distinguished_names = [aws_directory_service_directory.test.id] + organizational_unit_distinguished_names = [%[4]q, %[5]q] service_account_credentials { account_name = %[2]q account_password = %[3]q } + + depends_on = [ + aws_directory_service_directory.test + ] } resource "aws_directory_service_directory" "test" { @@ -162,5 +272,5 @@ resource "aws_directory_service_directory" "test" { subnet_ids = aws_subnet.test[*].id } } -`, name, userName, password)) +`, domain, userName, password, orgUnitDN1, orgUnitDN2)) } From a0ae390a0b58275a89e76451ebe4264d3e46ded1 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 15 Nov 2021 14:00:01 -0800 Subject: [PATCH 192/304] Tweak to documentation --- website/docs/r/appstream_directory_config.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/appstream_directory_config.html.markdown b/website/docs/r/appstream_directory_config.html.markdown index 1d50e27204a4..da133936691b 100644 --- a/website/docs/r/appstream_directory_config.html.markdown +++ b/website/docs/r/appstream_directory_config.html.markdown @@ -30,7 +30,7 @@ The following arguments are required: * `directory_name` - (Required) Fully qualified name of the directory. * `organizational_unit_distinguished_names` - (Required) Distinguished names of the organizational units for computer accounts. -* `service_account_credentials` - (Required) Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See below. +* `service_account_credentials` - (Required) Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See [`service_account_credentials`](#service_account_credentials) below. ### `service_account_credentials` From d26b3591ad948d9f7256c121929056d31a48ee7c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 15 Nov 2021 17:11:44 -0500 Subject: [PATCH 193/304] r/aws_s3control_access_point_policy: New resource. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPointPolicy_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPointPolicy_ -timeout 180m === RUN TestAccS3ControlAccessPointPolicy_basic === PAUSE TestAccS3ControlAccessPointPolicy_basic === RUN TestAccS3ControlAccessPointPolicy_disappears === PAUSE TestAccS3ControlAccessPointPolicy_disappears === RUN TestAccS3ControlAccessPointPolicy_disappears_AccessPoint === PAUSE TestAccS3ControlAccessPointPolicy_disappears_AccessPoint === RUN TestAccS3ControlAccessPointPolicy_update === PAUSE TestAccS3ControlAccessPointPolicy_update === CONT TestAccS3ControlAccessPointPolicy_basic === CONT TestAccS3ControlAccessPointPolicy_update === CONT TestAccS3ControlAccessPointPolicy_disappears_AccessPoint === CONT TestAccS3ControlAccessPointPolicy_disappears --- PASS: TestAccS3ControlAccessPointPolicy_disappears_AccessPoint (29.92s) --- PASS: TestAccS3ControlAccessPointPolicy_disappears (30.91s) --- PASS: TestAccS3ControlAccessPointPolicy_basic (34.21s) --- PASS: TestAccS3ControlAccessPointPolicy_update (59.52s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 63.190s % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_ -timeout 180m === RUN TestAccS3ControlAccessPoint_basic === PAUSE TestAccS3ControlAccessPoint_basic === RUN TestAccS3ControlAccessPoint_disappears === PAUSE TestAccS3ControlAccessPoint_disappears === RUN TestAccS3ControlAccessPoint_Bucket_arn === PAUSE TestAccS3ControlAccessPoint_Bucket_arn === RUN TestAccS3ControlAccessPoint_policy === PAUSE TestAccS3ControlAccessPoint_policy === RUN TestAccS3ControlAccessPoint_publicAccessBlock === PAUSE TestAccS3ControlAccessPoint_publicAccessBlock === RUN TestAccS3ControlAccessPoint_vpc === PAUSE TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_basic === CONT TestAccS3ControlAccessPoint_publicAccessBlock === CONT TestAccS3ControlAccessPoint_policy === CONT TestAccS3ControlAccessPoint_Bucket_arn === CONT TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_disappears === CONT TestAccS3ControlAccessPoint_Bucket_arn acctest.go:1250: skipping since no Outposts found --- SKIP: TestAccS3ControlAccessPoint_Bucket_arn (1.58s) --- PASS: TestAccS3ControlAccessPoint_disappears (26.09s) --- PASS: TestAccS3ControlAccessPoint_vpc (31.94s) --- PASS: TestAccS3ControlAccessPoint_publicAccessBlock (32.23s) --- PASS: TestAccS3ControlAccessPoint_basic (32.28s) --- PASS: TestAccS3ControlAccessPoint_policy (56.70s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 60.246s --- .changelog/19294.txt | 8 + internal/provider/provider.go | 1 + internal/service/s3control/access_point.go | 29 +- .../service/s3control/access_point_policy.go | 170 +++++++++++ .../s3control/access_point_policy_test.go | 270 ++++++++++++++++++ .../service/s3control/access_point_test.go | 77 ----- internal/service/s3control/find.go | 54 ++++ website/docs/r/s3_access_point.html.markdown | 2 + ...3control_access_point_policy.html.markdown | 71 +++++ 9 files changed, 599 insertions(+), 83 deletions(-) create mode 100644 internal/service/s3control/access_point_policy.go create mode 100644 internal/service/s3control/access_point_policy_test.go create mode 100644 website/docs/r/s3control_access_point_policy.html.markdown diff --git a/.changelog/19294.txt b/.changelog/19294.txt index bdcc62bea539..f6147e671abe 100644 --- a/.changelog/19294.txt +++ b/.changelog/19294.txt @@ -4,4 +4,12 @@ aws_s3control_object_lambda_access_point ```release-note:new-resource aws_s3control_object_lambda_access_point_policy +``` + +```release-note:new-resource +aws_s3control_access_point_policy +``` + +```release-note:enhancement +resource/aws_s3_access_point: The `policy` argument is now `Computed` so as to support use of the standalone `aws_s3control_access_point_policy` resource ``` \ No newline at end of file diff --git a/internal/provider/provider.go b/internal/provider/provider.go index f6d11c7bee6c..f47f37d082af 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1479,6 +1479,7 @@ func Provider() *schema.Provider { "aws_s3_object_copy": s3.ResourceObjectCopy(), "aws_s3_access_point": s3control.ResourceAccessPoint(), + "aws_s3control_access_point_policy": s3control.ResourceAccessPointPolicy(), "aws_s3_account_public_access_block": s3control.ResourceAccountPublicAccessBlock(), "aws_s3control_bucket": s3control.ResourceBucket(), "aws_s3control_bucket_lifecycle_configuration": s3control.ResourceBucketLifecycleConfiguration(), diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 0a3a39cf81af..5393f5f01685 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -65,6 +65,7 @@ func ResourceAccessPoint() *schema.Resource { "policy": { Type: schema.TypeString, Optional: true, + Computed: true, DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, }, "public_access_block_configuration": { @@ -351,15 +352,31 @@ func resourceAccessPointDelete(d *schema.ResourceData, meta interface{}) error { const accessPointResourceIDSeparator = ":" -func AccessPointCreateResourceID(accessPointARN, accountID, accessPointName string) string { - if v, err := arn.Parse(accessPointARN); err != nil && v.Service == "s3-outposts" { - return accessPointARN +func AccessPointCreateResourceID(accessPointARN string) (string, error) { + v, err := arn.Parse(accessPointARN) + + if err != nil { + return "", err } - parts := []string{accountID, accessPointName} - id := strings.Join(parts, accessPointResourceIDSeparator) + switch service := v.Service; service { + case "s3": + resource := v.Resource + if !strings.HasPrefix(resource, "accesspoint/") { + return "", fmt.Errorf("unexpected resource: %s", resource) + } + + parts := []string{v.AccountID, strings.TrimPrefix(resource, "accesspoint/")} + id := strings.Join(parts, accessPointResourceIDSeparator) - return id + return id, nil + + case "s3-outposts": + return accessPointARN, nil + + default: + return "", fmt.Errorf("unexpected service: %s", service) + } } func AccessPointParseResourceID(id string) (string, string, error) { diff --git a/internal/service/s3control/access_point_policy.go b/internal/service/s3control/access_point_policy.go new file mode 100644 index 000000000000..fd5c908c6b28 --- /dev/null +++ b/internal/service/s3control/access_point_policy.go @@ -0,0 +1,170 @@ +package s3control + +import ( + "context" + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceAccessPointPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessPointPolicyCreate, + Read: resourceAccessPointPolicyRead, + Update: resourceAccessPointPolicyUpdate, + Delete: resourceAccessPointPolicyDelete, + + Importer: &schema.ResourceImporter{ + StateContext: resourceAccessPointPolicyImport, + }, + + Schema: map[string]*schema.Schema{ + "access_point_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "has_public_access_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "policy": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, + }, + }, + } +} + +func resourceAccessPointPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + resourceID, err := AccessPointCreateResourceID(d.Get("access_point_arn").(string)) + + if err != nil { + return err + } + + accountID, name, err := AccessPointParseResourceID(resourceID) + + if err != nil { + return err + } + + input := &s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + Policy: aws.String(d.Get("policy").(string)), + } + + log.Printf("[DEBUG] Creating S3 Access Point Policy: %s", input) + _, err = conn.PutAccessPointPolicy(input) + + if err != nil { + return fmt.Errorf("error creating S3 Access Point (%s) Policy: %w", resourceID, err) + } + + d.SetId(resourceID) + + return resourceAccessPointPolicyRead(d, meta) +} + +func resourceAccessPointPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := AccessPointParseResourceID(d.Id()) + + if err != nil { + return err + } + + policy, status, err := FindAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] S3 Access Point Policy (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading S3 Access Point Policy (%s): %w", d.Id(), err) + } + + d.Set("has_public_access_policy", status.IsPublic) + d.Set("policy", policy) + + return nil +} + +func resourceAccessPointPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := AccessPointParseResourceID(d.Id()) + + if err != nil { + return err + } + + input := &s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + Policy: aws.String(d.Get("policy").(string)), + } + + log.Printf("[DEBUG] Updating S3 Access Point Policy: %s", input) + _, err = conn.PutAccessPointPolicy(input) + + if err != nil { + return fmt.Errorf("error updating S3 Access Point Policy (%s): %w", d.Id(), err) + } + + return resourceAccessPointPolicyRead(d, meta) +} + +func resourceAccessPointPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3ControlConn + + accountID, name, err := AccessPointParseResourceID(d.Id()) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting S3 Access Point Policy: %s", d.Id()) + _, err = conn.DeleteAccessPointPolicy(&s3control.DeleteAccessPointPolicyInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + }) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting S3 Access Point Policy (%s): %w", d.Id(), err) + } + + return nil +} + +func resourceAccessPointPolicyImport(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + resourceID, err := AccessPointCreateResourceID(d.Id()) + + if err != nil { + return nil, err + } + + d.Set("access_point_arn", d.Id()) + d.SetId(resourceID) + + return []*schema.ResourceData{d}, nil +} diff --git a/internal/service/s3control/access_point_policy_test.go b/internal/service/s3control/access_point_policy_test.go new file mode 100644 index 000000000000..ee940799f3f3 --- /dev/null +++ b/internal/service/s3control/access_point_policy_test.go @@ -0,0 +1,270 @@ +package s3control_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/service/s3control" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccS3ControlAccessPointPolicy_basic(t *testing.T) { + resourceName := "aws_s3control_access_point_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointPolicyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "true"), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`s3:GetObjectTagging`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAccessPointPolicyImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccS3ControlAccessPointPolicy_disappears(t *testing.T) { + resourceName := "aws_s3control_access_point_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointPolicyExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceAccessPointPolicy(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3ControlAccessPointPolicy_disappears_AccessPoint(t *testing.T) { + resourceName := "aws_s3control_access_point_policy.test" + accessPointResourceName := "aws_s3_access_point.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointPolicyExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceAccessPoint(), accessPointResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3ControlAccessPointPolicy_update(t *testing.T) { + resourceName := "aws_s3control_access_point_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckAccessPointPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAccessPointPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointPolicyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "true"), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`s3:GetObjectTagging`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAccessPointPolicyImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + { + Config: testAccAccessPointPolicyUpdatedConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointPolicyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "true"), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`s3:GetObjectLegalHold`)), + ), + }, + }, + }) +} + +func testAccAccessPointPolicyImportStateIdFunc(n string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[n] + if !ok { + return "", fmt.Errorf("Not found: %s", n) + } + + return rs.Primary.Attributes["access_point_arn"], nil + } +} + +func testAccCheckAccessPointPolicyDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3control_access_point_policy" { + continue + } + + accountID, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + + if err != nil { + return err + } + + _, _, err = tfs3control.FindAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("S3 Access Point Policy %s still exists", rs.Primary.ID) + } + + return nil +} + +func testAccCheckAccessPointPolicyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No S3 Access Point Policy ID is set") + } + + accountID, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + + if err != nil { + return err + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + + _, _, err = tfs3control.FindAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) + + if err != nil { + return err + } + + return nil + } +} + +func testAccAccessPointPolicyConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.id + name = %[1]q + + public_access_block_configuration { + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false + } +} + +resource "aws_s3control_access_point_policy" "test" { + access_point_arn = aws_s3_access_point.test.arn + + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Action = "s3:GetObjectTagging" + Principal = { + AWS = "*" + } + Resource = "${aws_s3_access_point.test.arn}/object/*" + }] + }) +} +`, rName) +} + +func testAccAccessPointPolicyUpdatedConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.id + name = %[1]q + + public_access_block_configuration { + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false + } +} + +resource "aws_s3control_access_point_policy" "test" { + access_point_arn = aws_s3_access_point.test.arn + + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Action = [ + "s3:GetObjectLegalHold", + "s3:GetObjectRetention", + ] + Principal = { + AWS = "*" + } + Resource = "${aws_s3_access_point.test.arn}/object/prefix/*" + }] + }) +} +`, rName) +} diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index e2ad0348654e..afd9a20cb661 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3control" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -82,31 +81,6 @@ func TestAccS3ControlAccessPoint_disappears(t *testing.T) { }) } -func TestAccS3ControlAccessPoint_Disappears_bucket(t *testing.T) { - var v s3control.GetAccessPointOutput - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_s3_access_point.test" - bucketResourceName := "aws_s3_bucket.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3control.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckAccessPointDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAccessPointConfig_basic(bucketName, accessPointName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAccessPointExists(resourceName, &v), - testAccCheckDestroyBucket(bucketResourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - func TestAccS3ControlAccessPoint_Bucket_arn(t *testing.T) { var v s3control.GetAccessPointOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -230,14 +204,6 @@ func TestAccS3ControlAccessPoint_policy(t *testing.T) { testAccCheckAccessPointHasPolicy(resourceName, expectedPolicyText2), ), }, - { - Config: testAccAccessPointConfig_noPolicy(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAccessPointExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), - resource.TestCheckResourceAttr(resourceName, "policy", ""), - ), - }, }, }) } @@ -584,26 +550,6 @@ data "aws_iam_policy_document" "test" { `, rName) } -func testAccAccessPointConfig_noPolicy(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_access_point" "test" { - bucket = aws_s3_bucket.test.bucket - name = %[1]q - - public_access_block_configuration { - block_public_acls = true - block_public_policy = false - ignore_public_acls = true - restrict_public_buckets = false - } -} -`, rName) -} - func testAccAccessPointConfig_publicAccessBlock(rName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { @@ -648,26 +594,3 @@ resource "aws_s3_access_point" "test" { } `, rName) } - -func testAccCheckDestroyBucket(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Bucket ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn - _, err := conn.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(rs.Primary.ID), - }) - - if err != nil { - return fmt.Errorf("Error destroying Bucket (%s) in testAccCheckDestroyBucket: %s", rs.Primary.ID, err) - } - return nil - } -} diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index 00cb3e66f36a..ec6926c969ad 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -26,6 +26,60 @@ func findPublicAccessBlockConfiguration(conn *s3control.S3Control, accountID str return output.PublicAccessBlockConfiguration, nil } +func FindAccessPointPolicyAndStatusByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (string, *s3control.PolicyStatus, error) { + input1 := &s3control.GetAccessPointPolicyInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + output1, err := conn.GetAccessPointPolicy(input1) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { + return "", nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input1, + } + } + + if err != nil { + return "", nil, err + } + + if output1 == nil { + return "", nil, tfresource.NewEmptyResultError(input1) + } + + policy := aws.StringValue(output1.Policy) + + if policy == "" { + return "", nil, tfresource.NewEmptyResultError(input1) + } + + input2 := &s3control.GetAccessPointPolicyStatusInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + output2, err := conn.GetAccessPointPolicyStatus(input2) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { + return "", nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input2, + } + } + + if err != nil { + return "", nil, err + } + + if output2 == nil || output2.PolicyStatus == nil { + return "", nil, tfresource.NewEmptyResultError(input2) + } + + return policy, output2.PolicyStatus, nil +} + func FindMultiRegionAccessPointByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (*s3control.MultiRegionAccessPointReport, error) { input := &s3control.GetMultiRegionAccessPointInput{ AccountId: aws.String(accountID), diff --git a/website/docs/r/s3_access_point.html.markdown b/website/docs/r/s3_access_point.html.markdown index 81b4675dd273..18207e3d0ca3 100644 --- a/website/docs/r/s3_access_point.html.markdown +++ b/website/docs/r/s3_access_point.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a resource to manage an S3 Access Point. +~> **NOTE on Access Points and Access Point Policies:** Terraform provides both a standalone [Access Point Policy](s3control_access_point_policy.html) resource and an Access Point resource with a resource policy defined in-line. You cannot use an Access Point with in-line resource policy in conjunction with an Access Point Policy resource. Doing so will cause a conflict of policies and will overwrite the access point's resource policy. + -> Advanced usage: To use a custom API endpoint for this Terraform resource, use the [`s3control` endpoint provider configuration](/docs/providers/aws/index.html#s3control), not the `s3` endpoint provider configuration. ## Example Usage diff --git a/website/docs/r/s3control_access_point_policy.html.markdown b/website/docs/r/s3control_access_point_policy.html.markdown new file mode 100644 index 000000000000..a1d3cb77e241 --- /dev/null +++ b/website/docs/r/s3control_access_point_policy.html.markdown @@ -0,0 +1,71 @@ +--- +subcategory: "S3 Control" +layout: "aws" +page_title: "AWS: aws_s3control_access_point_policy" +description: |- + Provides a resource to manage an S3 Access Point resource policy. +--- + +# Resource: aws_s3control_access_point_policy + +Provides a resource to manage an S3 Access Point resource policy. + +~> **NOTE on Access Points and Access Point Policies:** Terraform provides both a standalone Access Point Policy resource and an [Access Point](s3_access_point.html) resource with a resource policy defined in-line. You cannot use an Access Point with in-line resource policy in conjunction with an Access Point Policy resource. Doing so will cause a conflict of policies and will overwrite the access point's resource policy. + +## Example Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "example" +} + +resource "aws_s3_access_point" "example" { + bucket = aws_s3_bucket.example.id + name = "example" + + public_access_block_configuration { + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false + } +} + +resource "aws_s3control_access_point_policy" "example" { + access_point_arn = aws_s3_access_point.example.arn + + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Action = "s3:GetObjectTagging" + Principal = { + AWS = "*" + } + Resource = "${aws_s3_access_point.example.arn}/object/*" + }] + }) +} +``` + +## Argument Reference + +The following arguments are supported: + +* `access_point_arn` - (Required) The ARN of the access point that you want to associate with the specified policy. +* `policy` - (Required) The policy that you want to apply to the specified access point. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `has_public_access_policy` - Indicates whether this access point currently has a policy that allows public access. +* `id` - The AWS account ID and access point name separated by a colon (`:`). + +## Import + +Access Point policies can be imported using the `access_point_arn`, e.g. + +``` +$ terraform import aws_s3control_access_point_policy.example arn:aws:s3:us-west-2:123456789012:accesspoint/example +``` From 3a129d3bcad002298e3f6fff745866ce482f496c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 15 Nov 2021 16:03:18 -0800 Subject: [PATCH 194/304] Fixes formatting --- internal/service/appstream/directory_config_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/appstream/directory_config_test.go b/internal/service/appstream/directory_config_test.go index 39c3f2870795..919d8d8e2c2c 100644 --- a/internal/service/appstream/directory_config_test.go +++ b/internal/service/appstream/directory_config_test.go @@ -225,7 +225,7 @@ resource "aws_appstream_directory_config" "test" { } depends_on = [ - aws_directory_service_directory.test + aws_directory_service_directory.test ] } @@ -257,7 +257,7 @@ resource "aws_appstream_directory_config" "test" { } depends_on = [ - aws_directory_service_directory.test + aws_directory_service_directory.test ] } From 699458be6da3348114c30c4d2db8e32a73122dbb Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 15 Nov 2021 16:25:14 -0800 Subject: [PATCH 195/304] Adds CHANGELOG --- .changelog/21777.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21777.txt diff --git a/.changelog/21777.txt b/.changelog/21777.txt new file mode 100644 index 000000000000..ca2a4f9b16d0 --- /dev/null +++ b/.changelog/21777.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_autoscaling_group: Fix pending state in instance refresh +``` From a3d55275583c29983ff247d810e1ad8c64c2373f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Nov 2021 06:09:37 +0000 Subject: [PATCH 196/304] build(deps): bump github.com/aws/aws-sdk-go from 1.42.3 to 1.42.5 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.42.3 to 1.42.5. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.42.3...v1.42.5) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 074a5569d67c..a24244a2a6f7 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect - github.com/aws/aws-sdk-go v1.42.3 + github.com/aws/aws-sdk-go v1.42.5 github.com/beevik/etree v1.1.0 github.com/evanphx/json-patch v0.5.2 // indirect github.com/fatih/color v1.9.0 // indirect diff --git a/go.sum b/go.sum index 1e2f189ec055..ccd7b7cfd867 100644 --- a/go.sum +++ b/go.sum @@ -66,8 +66,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.42.3 h1:lBKr3tQ06m1uykiychMNKLK1bRfOzaIEQpsI/S3QiNc= -github.com/aws/aws-sdk-go v1.42.3/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.42.5 h1:0xNoQrGh9InmUsT+9qzZ8QLfBEUsnev5BMeED6t6cKI= +github.com/aws/aws-sdk-go v1.42.5/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= From 736a2ddda05411a025d9c639d004672e9b47c751 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 08:24:46 -0500 Subject: [PATCH 197/304] r/aws_cloudfront_cache_policy: Fix support for 0 values of 'default_ttl', 'max_ttl' and 'min_ttl'. --- internal/service/cloudfront/cache_policy.go | 28 +++----- .../service/cloudfront/cache_policy_test.go | 69 +++++++++++++++++++ 2 files changed, 77 insertions(+), 20 deletions(-) diff --git a/internal/service/cloudfront/cache_policy.go b/internal/service/cloudfront/cache_policy.go index df8960740ff3..7a92ca028b6c 100644 --- a/internal/service/cloudfront/cache_policy.go +++ b/internal/service/cloudfront/cache_policy.go @@ -161,22 +161,16 @@ func resourceCachePolicyCreate(d *schema.ResourceData, meta interface{}) error { name := d.Get("name").(string) apiObject := &cloudfront.CachePolicyConfig{ - MinTTL: aws.Int64(int64(d.Get("min_ttl").(int))), - Name: aws.String(name), + DefaultTTL: aws.Int64(int64(d.Get("default_ttl").(int))), + MaxTTL: aws.Int64(int64(d.Get("max_ttl").(int))), + MinTTL: aws.Int64(int64(d.Get("min_ttl").(int))), + Name: aws.String(name), } if v, ok := d.GetOk("comment"); ok { apiObject.Comment = aws.String(v.(string)) } - if v, ok := d.GetOk("default_ttl"); ok { - apiObject.DefaultTTL = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("max_ttl"); ok { - apiObject.MaxTTL = aws.Int64(int64(v.(int))) - } - if v, ok := d.GetOk("parameters_in_cache_key_and_forwarded_to_origin"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { apiObject.ParametersInCacheKeyAndForwardedToOrigin = expandParametersInCacheKeyAndForwardedToOrigin(v.([]interface{})[0].(map[string]interface{})) } @@ -238,22 +232,16 @@ func resourceCachePolicyUpdate(d *schema.ResourceData, meta interface{}) error { // "When you update a cache policy configuration, all the fields are updated with the values provided in the request. You cannot update some fields independent of others." // apiObject := &cloudfront.CachePolicyConfig{ - MinTTL: aws.Int64(int64(d.Get("min_ttl").(int))), - Name: aws.String(d.Get("name").(string)), + DefaultTTL: aws.Int64(int64(d.Get("default_ttl").(int))), + MaxTTL: aws.Int64(int64(d.Get("max_ttl").(int))), + MinTTL: aws.Int64(int64(d.Get("min_ttl").(int))), + Name: aws.String(d.Get("name").(string)), } if v, ok := d.GetOk("comment"); ok { apiObject.Comment = aws.String(v.(string)) } - if v, ok := d.GetOk("default_ttl"); ok { - apiObject.DefaultTTL = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("max_ttl"); ok { - apiObject.MaxTTL = aws.Int64(int64(v.(int))) - } - if v, ok := d.GetOk("parameters_in_cache_key_and_forwarded_to_origin"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { apiObject.ParametersInCacheKeyAndForwardedToOrigin = expandParametersInCacheKeyAndForwardedToOrigin(v.([]interface{})[0].(map[string]interface{})) } diff --git a/internal/service/cloudfront/cache_policy_test.go b/internal/service/cloudfront/cache_policy_test.go index 0d3eb38d37e5..9a9fe41936cd 100644 --- a/internal/service/cloudfront/cache_policy_test.go +++ b/internal/service/cloudfront/cache_policy_test.go @@ -160,6 +160,49 @@ func TestAccCloudFrontCachePolicy_Items(t *testing.T) { }) } +func TestAccCloudFrontCachePolicy_ZeroTTLs(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudfront_cache_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(cloudfront.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, cloudfront.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckCloudFrontCachePolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCachePolicyZeroTTLsConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontCachePolicyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "comment", ""), + resource.TestCheckResourceAttr(resourceName, "default_ttl", "0"), + resource.TestCheckResourceAttrSet(resourceName, "etag"), + resource.TestCheckResourceAttr(resourceName, "min_ttl", "0"), + resource.TestCheckResourceAttr(resourceName, "max_ttl", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.#", "0"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.enable_accept_encoding_brotli", "false"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.enable_accept_encoding_gzip", "false"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckCloudFrontCachePolicyDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).CloudFrontConn @@ -306,3 +349,29 @@ resource "aws_cloudfront_cache_policy" "test" { } `, rName) } + +func testAccCachePolicyZeroTTLsConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudfront_cache_policy" "test" { + name = %[1]q + + default_ttl = 0 + max_ttl = 0 + min_ttl = 0 + + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "none" + } + + headers_config { + header_behavior = "none" + } + + query_strings_config { + query_string_behavior = "none" + } + } +} +`, rName) +} From 1122a79945996fa52a64e0756a72413fb8e4af08 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 08:27:33 -0500 Subject: [PATCH 198/304] Add CHANGELOG entries. --- .changelog/21793.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21793.txt diff --git a/.changelog/21793.txt b/.changelog/21793.txt new file mode 100644 index 000000000000..1519ccd7c167 --- /dev/null +++ b/.changelog/21793.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cloudfront_cache_policy: Fix 0 values for `default_ttl`, `max_ttl` and `min_ttl` arguments +``` \ No newline at end of file From 3b1a32337758a32cb546ebc36a34ff529b1884bb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 08:42:42 -0500 Subject: [PATCH 199/304] r/aws_internet_gateway: Allow 'available' as a pending state during gateway detach. --- internal/service/ec2/wait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/ec2/wait.go b/internal/service/ec2/wait.go index db277157fb6f..87e537b36fe3 100644 --- a/internal/service/ec2/wait.go +++ b/internal/service/ec2/wait.go @@ -714,7 +714,7 @@ func WaitInternetGatewayAttached(conn *ec2.EC2, internetGatewayID, vpcID string, func WaitInternetGatewayDetached(conn *ec2.EC2, internetGatewayID, vpcID string, timeout time.Duration) (*ec2.InternetGatewayAttachment, error) { stateConf := &resource.StateChangeConf{ - Pending: []string{ec2.AttachmentStatusDetaching}, + Pending: []string{InternetGatewayAttachmentStateAvailable, ec2.AttachmentStatusDetaching}, Target: []string{}, Timeout: timeout, Refresh: StatusInternetGatewayAttachmentState(conn, internetGatewayID, vpcID), From 7aac7f5cba8150961ed46918a648989283529a05 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 08:46:47 -0500 Subject: [PATCH 200/304] Add CHANGELOG entry. --- .changelog/21794.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21794.txt diff --git a/.changelog/21794.txt b/.changelog/21794.txt new file mode 100644 index 000000000000..d49c163d1145 --- /dev/null +++ b/.changelog/21794.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_internet_gateway: Allow `available` as a *pending* state during gateway detach +``` \ No newline at end of file From 63e88c7cfa7375a064493a520a75858d0a8b39e5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 09:07:08 -0500 Subject: [PATCH 201/304] r/aws_s3_access_point: 'policy' can not (yet) be Computed as that's a breaking change (no ability to delete a policy). Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_ -timeout 180m === RUN TestAccS3ControlAccessPoint_basic === PAUSE TestAccS3ControlAccessPoint_basic === RUN TestAccS3ControlAccessPoint_disappears === PAUSE TestAccS3ControlAccessPoint_disappears === RUN TestAccS3ControlAccessPoint_Bucket_arn === PAUSE TestAccS3ControlAccessPoint_Bucket_arn === RUN TestAccS3ControlAccessPoint_policy === PAUSE TestAccS3ControlAccessPoint_policy === RUN TestAccS3ControlAccessPoint_publicAccessBlock === PAUSE TestAccS3ControlAccessPoint_publicAccessBlock === RUN TestAccS3ControlAccessPoint_vpc === PAUSE TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_basic === CONT TestAccS3ControlAccessPoint_publicAccessBlock === CONT TestAccS3ControlAccessPoint_policy === CONT TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_Bucket_arn === CONT TestAccS3ControlAccessPoint_disappears === CONT TestAccS3ControlAccessPoint_Bucket_arn acctest.go:1250: skipping since no Outposts found --- SKIP: TestAccS3ControlAccessPoint_Bucket_arn (1.47s) --- PASS: TestAccS3ControlAccessPoint_disappears (28.54s) --- PASS: TestAccS3ControlAccessPoint_publicAccessBlock (32.46s) --- PASS: TestAccS3ControlAccessPoint_basic (32.61s) --- PASS: TestAccS3ControlAccessPoint_vpc (33.23s) --- PASS: TestAccS3ControlAccessPoint_policy (79.79s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 83.207s % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPointPolicy_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPointPolicy_ -timeout 180m === RUN TestAccS3ControlAccessPointPolicy_basic === PAUSE TestAccS3ControlAccessPointPolicy_basic === RUN TestAccS3ControlAccessPointPolicy_disappears === PAUSE TestAccS3ControlAccessPointPolicy_disappears === RUN TestAccS3ControlAccessPointPolicy_disappears_AccessPoint === PAUSE TestAccS3ControlAccessPointPolicy_disappears_AccessPoint === RUN TestAccS3ControlAccessPointPolicy_update === PAUSE TestAccS3ControlAccessPointPolicy_update === CONT TestAccS3ControlAccessPointPolicy_basic === CONT TestAccS3ControlAccessPointPolicy_update === CONT TestAccS3ControlAccessPointPolicy_disappears_AccessPoint === CONT TestAccS3ControlAccessPointPolicy_disappears --- PASS: TestAccS3ControlAccessPointPolicy_disappears_AccessPoint (30.56s) --- PASS: TestAccS3ControlAccessPointPolicy_disappears (31.74s) --- PASS: TestAccS3ControlAccessPointPolicy_basic (36.51s) --- PASS: TestAccS3ControlAccessPointPolicy_update (60.98s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 64.567s --- .changelog/19294.txt | 4 --- internal/service/s3control/access_point.go | 1 - .../s3control/access_point_policy_test.go | 8 ++++++ .../service/s3control/access_point_test.go | 28 +++++++++++++++++++ ...3control_access_point_policy.html.markdown | 4 +++ 5 files changed, 40 insertions(+), 5 deletions(-) diff --git a/.changelog/19294.txt b/.changelog/19294.txt index f6147e671abe..13a8132449ff 100644 --- a/.changelog/19294.txt +++ b/.changelog/19294.txt @@ -9,7 +9,3 @@ aws_s3control_object_lambda_access_point_policy ```release-note:new-resource aws_s3control_access_point_policy ``` - -```release-note:enhancement -resource/aws_s3_access_point: The `policy` argument is now `Computed` so as to support use of the standalone `aws_s3control_access_point_policy` resource -``` \ No newline at end of file diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 5393f5f01685..ed130b19b8e8 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -65,7 +65,6 @@ func ResourceAccessPoint() *schema.Resource { "policy": { Type: schema.TypeString, Optional: true, - Computed: true, DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, }, "public_access_block_configuration": { diff --git a/internal/service/s3control/access_point_policy_test.go b/internal/service/s3control/access_point_policy_test.go index ee940799f3f3..10be7ad42e83 100644 --- a/internal/service/s3control/access_point_policy_test.go +++ b/internal/service/s3control/access_point_policy_test.go @@ -210,6 +210,10 @@ resource "aws_s3_access_point" "test" { ignore_public_acls = true restrict_public_buckets = false } + + lifecycle { + ignore_changes = [policy] + } } resource "aws_s3control_access_point_policy" "test" { @@ -246,6 +250,10 @@ resource "aws_s3_access_point" "test" { ignore_public_acls = true restrict_public_buckets = false } + + lifecycle { + ignore_changes = [policy] + } } resource "aws_s3control_access_point_policy" "test" { diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index afd9a20cb661..a9f23aabac52 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -204,6 +204,14 @@ func TestAccS3ControlAccessPoint_policy(t *testing.T) { testAccCheckAccessPointHasPolicy(resourceName, expectedPolicyText2), ), }, + { + Config: testAccAccessPointConfig_noPolicy(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), + resource.TestCheckResourceAttr(resourceName, "policy", ""), + ), + }, }, }) } @@ -550,6 +558,26 @@ data "aws_iam_policy_document" "test" { `, rName) } +func testAccAccessPointConfig_noPolicy(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[1]q + + public_access_block_configuration { + block_public_acls = true + block_public_policy = false + ignore_public_acls = true + restrict_public_buckets = false + } +} +`, rName) +} + func testAccAccessPointConfig_publicAccessBlock(rName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/website/docs/r/s3control_access_point_policy.html.markdown b/website/docs/r/s3control_access_point_policy.html.markdown index a1d3cb77e241..f921b22f6efa 100644 --- a/website/docs/r/s3control_access_point_policy.html.markdown +++ b/website/docs/r/s3control_access_point_policy.html.markdown @@ -29,6 +29,10 @@ resource "aws_s3_access_point" "example" { ignore_public_acls = true restrict_public_buckets = false } + + lifecycle { + ignore_changes = [policy] + } } resource "aws_s3control_access_point_policy" "example" { From 5f6cafd3e526ba828a24e932918f9ac1b51648c9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 11:26:52 -0500 Subject: [PATCH 202/304] r/aws_s3_access_point: Add 'FindAccessPointByAccountIDAndName'. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_ -timeout 180m === RUN TestAccS3ControlAccessPoint_basic === PAUSE TestAccS3ControlAccessPoint_basic === RUN TestAccS3ControlAccessPoint_disappears === PAUSE TestAccS3ControlAccessPoint_disappears === RUN TestAccS3ControlAccessPoint_Bucket_arn === PAUSE TestAccS3ControlAccessPoint_Bucket_arn === RUN TestAccS3ControlAccessPoint_policy === PAUSE TestAccS3ControlAccessPoint_policy === RUN TestAccS3ControlAccessPoint_publicAccessBlock === PAUSE TestAccS3ControlAccessPoint_publicAccessBlock === RUN TestAccS3ControlAccessPoint_vpc === PAUSE TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_basic === CONT TestAccS3ControlAccessPoint_publicAccessBlock === CONT TestAccS3ControlAccessPoint_disappears === CONT TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_policy === CONT TestAccS3ControlAccessPoint_Bucket_arn acctest.go:1250: skipping since no Outposts found --- SKIP: TestAccS3ControlAccessPoint_Bucket_arn (1.40s) --- PASS: TestAccS3ControlAccessPoint_disappears (28.06s) --- PASS: TestAccS3ControlAccessPoint_basic (30.38s) --- PASS: TestAccS3ControlAccessPoint_publicAccessBlock (30.51s) --- PASS: TestAccS3ControlAccessPoint_vpc (30.55s) --- PASS: TestAccS3ControlAccessPoint_policy (78.29s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 81.874s --- internal/service/s3control/access_point.go | 68 +++++++------------ .../service/s3control/access_point_test.go | 59 +++++----------- internal/service/s3control/find.go | 26 +++++++ 3 files changed, 66 insertions(+), 87 deletions(-) diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index ed130b19b8e8..33be775e8c1d 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -178,17 +179,17 @@ func resourceAccessPointCreate(d *schema.ResourceData, meta interface{}) error { func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := AccessPointParseResourceID(d.Id()) + if err != nil { return err } - output, err := conn.GetAccessPoint(&s3control.GetAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + s3OnOutposts := arn.IsARN(name) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + output, err := FindAccessPointByAccountIDAndName(conn, accountID, name) + + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Access Point (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -198,10 +199,6 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error reading S3 Access Point (%s): %w", d.Id(), err) } - if output == nil { - return fmt.Errorf("error reading S3 Access Point (%s): empty response", d.Id()) - } - if strings.HasPrefix(name, "arn:") { parsedAccessPointARN, err := arn.Parse(name) @@ -209,6 +206,7 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error parsing S3 Control Access Point ARN (%s): %w", name, err) } + // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3onoutposts.html#amazons3onoutposts-resources-for-iam-policies. bucketARN := arn.ARN{ AccountID: parsedAccessPointARN.AccountID, Partition: parsedAccessPointARN.Partition, @@ -225,20 +223,21 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", name) d.Set("bucket", bucketARN.String()) } else { + // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html#amazons3-resources-for-iam-policies. accessPointARN := arn.ARN{ - AccountID: accountId, Partition: meta.(*conns.AWSClient).Partition, + Service: "s3", Region: meta.(*conns.AWSClient).Region, + AccountID: accountID, Resource: fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), - Service: "s3", } d.Set("arn", accessPointARN.String()) d.Set("bucket", output.Bucket) } - d.Set("account_id", accountId) - d.Set("domain_name", meta.(*conns.AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountId))) + d.Set("account_id", accountID) + d.Set("domain_name", meta.(*conns.AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountID))) d.Set("name", output.Name) d.Set("network_origin", output.NetworkOrigin) if err := d.Set("public_access_block_configuration", flattenS3AccessPointPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)); err != nil { @@ -248,41 +247,20 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting vpc_configuration: %s", err) } - policyOutput, err := conn.GetAccessPointPolicy(&s3control.GetAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + policy, status, err := FindAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) - if tfawserr.ErrMessageContains(err, "NoSuchAccessPointPolicy", "") { - d.Set("policy", "") - } else { - if err != nil { - return fmt.Errorf("error reading S3 Access Point (%s) policy: %s", d.Id(), err) + if err == nil { + if s3OnOutposts { + d.Set("has_public_access_policy", false) + } else { + d.Set("has_public_access_policy", status.IsPublic) } - - d.Set("policy", policyOutput.Policy) - } - - // Return early since S3 on Outposts cannot have public policies - if strings.HasPrefix(name, "arn:") { - d.Set("has_public_access_policy", false) - - return nil - } - - policyStatusOutput, err := conn.GetAccessPointPolicyStatus(&s3control.GetAccessPointPolicyStatusInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - - if tfawserr.ErrMessageContains(err, "NoSuchAccessPointPolicy", "") { + d.Set("policy", policy) + } else if tfresource.NotFound(err) { d.Set("has_public_access_policy", false) + d.Set("policy", nil) } else { - if err != nil { - return fmt.Errorf("error reading S3 Access Point (%s) policy status: %s", d.Id(), err) - } - - d.Set("has_public_access_policy", policyStatusOutput.PolicyStatus.IsPublic) + return fmt.Errorf("error reading S3 Access Point (%s) policy: %w", d.Id(), err) } return nil diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index a9f23aabac52..3145972777ed 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" awspolicy "github.com/jen20/awspolicyequivalence" ) @@ -73,7 +74,7 @@ func TestAccS3ControlAccessPoint_disappears(t *testing.T) { Config: testAccAccessPointConfig_basic(bucketName, accessPointName), Check: resource.ComposeTestCheckFunc( testAccCheckAccessPointExists(resourceName, &v), - testAccCheckAccessPointDisappears(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3control.ResourceAccessPoint(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -296,61 +297,36 @@ func TestAccS3ControlAccessPoint_vpc(t *testing.T) { }) } -func testAccCheckAccessPointDisappears(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Access Point ID is set") - } +func testAccCheckAccessPointDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) - if err != nil { - return err + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_access_point" { + continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + accountID, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) - _, err = conn.DeleteAccessPoint(&s3control.DeleteAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) if err != nil { return err } - return nil - } -} - -func testAccCheckAccessPointDestroy(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn + _, err = tfs3control.FindAccessPointByAccountIDAndName(conn, accountID, name) - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_access_point" { + if tfresource.NotFound(err) { continue } - accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) if err != nil { return err } - _, err = conn.GetAccessPoint(&s3control.GetAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) - if err == nil { - return fmt.Errorf("S3 Access Point still exists") - } + return fmt.Errorf("S3 Access Point %s still exists", rs.Primary.ID) } return nil } -func testAccCheckAccessPointExists(n string, output *s3control.GetAccessPointOutput) resource.TestCheckFunc { +func testAccCheckAccessPointExists(n string, v *s3control.GetAccessPointOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -361,22 +337,21 @@ func testAccCheckAccessPointExists(n string, output *s3control.GetAccessPointOut return fmt.Errorf("No S3 Access Point ID is set") } - accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + accountID, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + if err != nil { return err } conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - resp, err := conn.GetAccessPoint(&s3control.GetAccessPointInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + output, err := tfs3control.FindAccessPointByAccountIDAndName(conn, accountID, name) + if err != nil { return err } - *output = *resp + *v = *output return nil } diff --git a/internal/service/s3control/find.go b/internal/service/s3control/find.go index ec6926c969ad..61f7a9dfe095 100644 --- a/internal/service/s3control/find.go +++ b/internal/service/s3control/find.go @@ -26,6 +26,32 @@ func findPublicAccessBlockConfiguration(conn *s3control.S3Control, accountID str return output.PublicAccessBlockConfiguration, nil } +func FindAccessPointByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (*s3control.GetAccessPointOutput, error) { + input := &s3control.GetAccessPointInput{ + AccountId: aws.String(accountID), + Name: aws.String(name), + } + + output, err := conn.GetAccessPoint(input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func FindAccessPointPolicyAndStatusByAccountIDAndName(conn *s3control.S3Control, accountID string, name string) (string, *s3control.PolicyStatus, error) { input1 := &s3control.GetAccessPointPolicyInput{ AccountId: aws.String(accountID), From 17cd03e6d3f6a08dee33c817b9edc595827bf173 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 11:52:29 -0500 Subject: [PATCH 203/304] r/aws_s3_access_point: Use 'AccessPointCreateResourceID'. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_ -timeout 180m === RUN TestAccS3ControlAccessPoint_basic === PAUSE TestAccS3ControlAccessPoint_basic === RUN TestAccS3ControlAccessPoint_disappears === PAUSE TestAccS3ControlAccessPoint_disappears === RUN TestAccS3ControlAccessPoint_Bucket_arn === PAUSE TestAccS3ControlAccessPoint_Bucket_arn === RUN TestAccS3ControlAccessPoint_policy === PAUSE TestAccS3ControlAccessPoint_policy === RUN TestAccS3ControlAccessPoint_publicAccessBlock === PAUSE TestAccS3ControlAccessPoint_publicAccessBlock === RUN TestAccS3ControlAccessPoint_vpc === PAUSE TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_basic === CONT TestAccS3ControlAccessPoint_publicAccessBlock === CONT TestAccS3ControlAccessPoint_Bucket_arn === CONT TestAccS3ControlAccessPoint_disappears === CONT TestAccS3ControlAccessPoint_vpc === CONT TestAccS3ControlAccessPoint_policy === CONT TestAccS3ControlAccessPoint_Bucket_arn acctest.go:1250: skipping since no Outposts found --- SKIP: TestAccS3ControlAccessPoint_Bucket_arn (1.52s) --- PASS: TestAccS3ControlAccessPoint_disappears (28.41s) --- PASS: TestAccS3ControlAccessPoint_basic (32.08s) --- PASS: TestAccS3ControlAccessPoint_publicAccessBlock (32.11s) --- PASS: TestAccS3ControlAccessPoint_vpc (32.46s) --- PASS: TestAccS3ControlAccessPoint_policy (81.88s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 85.830s --- internal/service/s3control/access_point.go | 157 ++++++++++----------- 1 file changed, 78 insertions(+), 79 deletions(-) diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 33be775e8c1d..7e5e4000fdd2 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -126,50 +126,59 @@ func ResourceAccessPoint() *schema.Resource { func resourceAccessPointCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId := meta.(*conns.AWSClient).AccountID + accountID := meta.(*conns.AWSClient).AccountID if v, ok := d.GetOk("account_id"); ok { - accountId = v.(string) + accountID = v.(string) } name := d.Get("name").(string) input := &s3control.CreateAccessPointInput{ - AccountId: aws.String(accountId), - Bucket: aws.String(d.Get("bucket").(string)), - Name: aws.String(name), - PublicAccessBlockConfiguration: expandS3AccessPointPublicAccessBlockConfiguration(d.Get("public_access_block_configuration").([]interface{})), - VpcConfiguration: expandS3AccessPointVpcConfiguration(d.Get("vpc_configuration").([]interface{})), + AccountId: aws.String(accountID), + Bucket: aws.String(d.Get("bucket").(string)), + Name: aws.String(name), + } + + if v, ok := d.GetOk("public_access_block_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.PublicAccessBlockConfiguration = expandPublicAccessBlockConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + + if v, ok := d.GetOk("vpc_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.VpcConfiguration = expandVpcConfiguration(v.([]interface{})[0].(map[string]interface{})) } log.Printf("[DEBUG] Creating S3 Access Point: %s", input) output, err := conn.CreateAccessPoint(input) if err != nil { - return fmt.Errorf("error creating S3 Control Access Point (%s): %w", name, err) + return fmt.Errorf("error creating S3 Access Point (%s): %w", name, err) } - if output == nil { - return fmt.Errorf("error creating S3 Control Access Point (%s): empty response", name) + resourceID, err := AccessPointCreateResourceID(aws.StringValue(output.AccessPointArn)) + + if err != nil { + return err } - parsedARN, err := arn.Parse(aws.StringValue(output.AccessPointArn)) + accountID, name, err = AccessPointParseResourceID(resourceID) - if err == nil && strings.HasPrefix(parsedARN.Resource, "outpost/") { - d.SetId(aws.StringValue(output.AccessPointArn)) - name = aws.StringValue(output.AccessPointArn) - } else { - d.SetId(fmt.Sprintf("%s:%s", accountId, name)) + if err != nil { + return err } + d.SetId(resourceID) + if v, ok := d.GetOk("policy"); ok { - log.Printf("[DEBUG] Putting S3 Access Point policy: %s", d.Id()) - _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ - AccountId: aws.String(accountId), + input := &s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountID), Name: aws.String(name), Policy: aws.String(v.(string)), - }) + } + + log.Printf("[DEBUG] Creating S3 Access Point policy: %s", input) + _, err = conn.PutAccessPointPolicy(input) if err != nil { - return fmt.Errorf("error putting S3 Access Point (%s) policy: %s", d.Id(), err) + return fmt.Errorf("error creating S3 Access Point (%s) policy: %w", d.Id(), err) } } @@ -199,25 +208,25 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error reading S3 Access Point (%s): %w", d.Id(), err) } - if strings.HasPrefix(name, "arn:") { - parsedAccessPointARN, err := arn.Parse(name) + if s3OnOutposts { + accessPointARN, err := arn.Parse(name) if err != nil { - return fmt.Errorf("error parsing S3 Control Access Point ARN (%s): %w", name, err) + return err } // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3onoutposts.html#amazons3onoutposts-resources-for-iam-policies. bucketARN := arn.ARN{ - AccountID: parsedAccessPointARN.AccountID, - Partition: parsedAccessPointARN.Partition, - Region: parsedAccessPointARN.Region, + Partition: accessPointARN.Partition, + Service: accessPointARN.Service, + Region: accessPointARN.Region, + AccountID: accessPointARN.AccountID, Resource: strings.Replace( - parsedAccessPointARN.Resource, + accessPointARN.Resource, fmt.Sprintf("accesspoint/%s", aws.StringValue(output.Name)), fmt.Sprintf("bucket/%s", aws.StringValue(output.Bucket)), 1, ), - Service: parsedAccessPointARN.Service, } d.Set("arn", name) @@ -240,11 +249,19 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { d.Set("domain_name", meta.(*conns.AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountID))) d.Set("name", output.Name) d.Set("network_origin", output.NetworkOrigin) - if err := d.Set("public_access_block_configuration", flattenS3AccessPointPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)); err != nil { - return fmt.Errorf("error setting public_access_block_configuration: %s", err) + if output.PublicAccessBlockConfiguration != nil { + if err := d.Set("public_access_block_configuration", []interface{}{flattenPublicAccessBlockConfiguration(output.PublicAccessBlockConfiguration)}); err != nil { + return fmt.Errorf("error setting public_access_block_configuration: %w", err) + } + } else { + d.Set("public_access_block_configuration", nil) } - if err := d.Set("vpc_configuration", flattenS3AccessPointVpcConfiguration(output.VpcConfiguration)); err != nil { - return fmt.Errorf("error setting vpc_configuration: %s", err) + if output.VpcConfiguration != nil { + if err := d.Set("vpc_configuration", []interface{}{flattenVpcConfiguration(output.VpcConfiguration)}); err != nil { + return fmt.Errorf("error setting vpc_configuration: %w", err) + } + } else { + d.Set("vpc_configuration", nil) } policy, status, err := FindAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) @@ -269,32 +286,35 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { func resourceAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := AccessPointParseResourceID(d.Id()) + if err != nil { return err } if d.HasChange("policy") { if v, ok := d.GetOk("policy"); ok { - log.Printf("[DEBUG] Putting S3 Access Point policy: %s", d.Id()) - _, err := conn.PutAccessPointPolicy(&s3control.PutAccessPointPolicyInput{ - AccountId: aws.String(accountId), + input := &s3control.PutAccessPointPolicyInput{ + AccountId: aws.String(accountID), Name: aws.String(name), Policy: aws.String(v.(string)), - }) + } + + log.Printf("[DEBUG] Updating S3 Access Point policy: %s", input) + _, err := conn.PutAccessPointPolicy(input) if err != nil { - return fmt.Errorf("error putting S3 Access Point (%s) policy: %s", d.Id(), err) + return fmt.Errorf("error updating S3 Access Point (%s) policy: %w", d.Id(), err) } } else { log.Printf("[DEBUG] Deleting S3 Access Point policy: %s", d.Id()) _, err := conn.DeleteAccessPointPolicy(&s3control.DeleteAccessPointPolicyInput{ - AccountId: aws.String(accountId), + AccountId: aws.String(accountID), Name: aws.String(name), }) if err != nil { - return fmt.Errorf("error deleting S3 Access Point (%s) policy: %s", d.Id(), err) + return fmt.Errorf("error deleting S3 Access Point (%s) policy: %w", d.Id(), err) } } } @@ -305,23 +325,24 @@ func resourceAccessPointUpdate(d *schema.ResourceData, meta interface{}) error { func resourceAccessPointDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).S3ControlConn - accountId, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := AccessPointParseResourceID(d.Id()) + if err != nil { return err } log.Printf("[DEBUG] Deleting S3 Access Point: %s", d.Id()) _, err = conn.DeleteAccessPoint(&s3control.DeleteAccessPointInput{ - AccountId: aws.String(accountId), + AccountId: aws.String(accountID), Name: aws.String(name), }) - if tfawserr.ErrMessageContains(err, "NoSuchAccessPoint", "") { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { return nil } if err != nil { - return fmt.Errorf("error deleting S3 Access Point (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting S3 Access Point (%s): %w", d.Id(), err) } return nil @@ -370,52 +391,30 @@ func AccessPointParseResourceID(id string) (string, string, error) { return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected account-id%[2]saccess-point-name", id, accessPointResourceIDSeparator) } -func expandS3AccessPointVpcConfiguration(vConfig []interface{}) *s3control.VpcConfiguration { - if len(vConfig) == 0 || vConfig[0] == nil { +func expandVpcConfiguration(tfMap map[string]interface{}) *s3control.VpcConfiguration { + if tfMap == nil { return nil } - mConfig := vConfig[0].(map[string]interface{}) - - return &s3control.VpcConfiguration{ - VpcId: aws.String(mConfig["vpc_id"].(string)), - } -} + apiObject := &s3control.VpcConfiguration{} -func flattenS3AccessPointVpcConfiguration(config *s3control.VpcConfiguration) []interface{} { - if config == nil { - return []interface{}{} + if v, ok := tfMap["vpc_id"].(string); ok { + apiObject.VpcId = aws.String(v) } - return []interface{}{map[string]interface{}{ - "vpc_id": aws.StringValue(config.VpcId), - }} + return apiObject } -func expandS3AccessPointPublicAccessBlockConfiguration(vConfig []interface{}) *s3control.PublicAccessBlockConfiguration { - if len(vConfig) == 0 || vConfig[0] == nil { +func flattenVpcConfiguration(apiObject *s3control.VpcConfiguration) map[string]interface{} { + if apiObject == nil { return nil } - mConfig := vConfig[0].(map[string]interface{}) - - return &s3control.PublicAccessBlockConfiguration{ - BlockPublicAcls: aws.Bool(mConfig["block_public_acls"].(bool)), - BlockPublicPolicy: aws.Bool(mConfig["block_public_policy"].(bool)), - IgnorePublicAcls: aws.Bool(mConfig["ignore_public_acls"].(bool)), - RestrictPublicBuckets: aws.Bool(mConfig["restrict_public_buckets"].(bool)), - } -} + tfMap := map[string]interface{}{} -func flattenS3AccessPointPublicAccessBlockConfiguration(config *s3control.PublicAccessBlockConfiguration) []interface{} { - if config == nil { - return []interface{}{} + if v := apiObject.VpcId; v != nil { + tfMap["vpc_id"] = aws.StringValue(v) } - return []interface{}{map[string]interface{}{ - "block_public_acls": aws.BoolValue(config.BlockPublicAcls), - "block_public_policy": aws.BoolValue(config.BlockPublicPolicy), - "ignore_public_acls": aws.BoolValue(config.IgnorePublicAcls), - "restrict_public_buckets": aws.BoolValue(config.RestrictPublicBuckets), - }} + return tfMap } From 7baaa936d982da645271c1bb45b784d705c7ac2e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 12:00:16 -0500 Subject: [PATCH 204/304] r/aws_s3_access_point: 'vpc_configuration.vpc_id' is ForceNew. --- .changelog/19294.txt | 4 ++++ internal/service/s3control/access_point.go | 1 + 2 files changed, 5 insertions(+) diff --git a/.changelog/19294.txt b/.changelog/19294.txt index 13a8132449ff..efacf55f02f4 100644 --- a/.changelog/19294.txt +++ b/.changelog/19294.txt @@ -9,3 +9,7 @@ aws_s3control_object_lambda_access_point_policy ```release-note:new-resource aws_s3control_access_point_policy ``` + +```release-note:bug +aws_s3_access_point: `vpc_configuration.vpc_id` is _ForceNew_ +``` \ No newline at end of file diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 7e5e4000fdd2..45ab88839fbf 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -115,6 +115,7 @@ func ResourceAccessPoint() *schema.Resource { "vpc_id": { Type: schema.TypeString, Required: true, + ForceNew: true, }, }, }, From cc9585349c4d06ef2b2640c91135762ce513c194 Mon Sep 17 00:00:00 2001 From: Alvin Wong Date: Tue, 16 Nov 2021 12:46:19 -0500 Subject: [PATCH 205/304] bump config timeouts to 10 minutes --- internal/service/route53resolver/wait.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/route53resolver/wait.go b/internal/service/route53resolver/wait.go index 66c707d446dd..35159dce70e2 100644 --- a/internal/service/route53resolver/wait.go +++ b/internal/service/route53resolver/wait.go @@ -23,10 +23,10 @@ const ( QueryLogConfigDeletedTimeout = 5 * time.Minute // Maximum amount of time to wait for a DnssecConfig to return ENABLED - DNSSECConfigCreatedTimeout = 5 * time.Minute + DNSSECConfigCreatedTimeout = 10 * time.Minute // Maximum amount of time to wait for a DnssecConfig to return DISABLED - DNSSECConfigDeletedTimeout = 5 * time.Minute + DNSSECConfigDeletedTimeout = 10 * time.Minute // Maximum amount of time to wait for a FirewallDomainList to be updated FirewallDomainListUpdatedTimeout = 5 * time.Minute From 68d9efca32ef6f3edfad295b12272ff5ee8cbf9e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 13:00:28 -0500 Subject: [PATCH 206/304] r/aws_s3_access_point: Add 'alias' attribute. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_basic -timeout 180m === RUN TestAccS3ControlAccessPoint_basic === PAUSE TestAccS3ControlAccessPoint_basic === CONT TestAccS3ControlAccessPoint_basic --- PASS: TestAccS3ControlAccessPoint_basic (28.71s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 32.316s --- .changelog/19294.txt | 4 ++++ internal/service/s3control/access_point.go | 5 +++++ internal/service/s3control/access_point_test.go | 2 ++ website/docs/r/s3_access_point.html.markdown | 1 + 4 files changed, 12 insertions(+) diff --git a/.changelog/19294.txt b/.changelog/19294.txt index efacf55f02f4..a1e2e3ebe7ff 100644 --- a/.changelog/19294.txt +++ b/.changelog/19294.txt @@ -12,4 +12,8 @@ aws_s3control_access_point_policy ```release-note:bug aws_s3_access_point: `vpc_configuration.vpc_id` is _ForceNew_ +``` + +```release-note:enhancement +aws_s3_access_point: Add `alias` attribute ``` \ No newline at end of file diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 45ab88839fbf..453ac3a40766 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -35,6 +35,10 @@ func ResourceAccessPoint() *schema.Resource { ForceNew: true, ValidateFunc: verify.ValidAccountID, }, + "alias": { + Type: schema.TypeString, + Computed: true, + }, "arn": { Type: schema.TypeString, Computed: true, @@ -247,6 +251,7 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { } d.Set("account_id", accountID) + d.Set("alias", output.Alias) d.Set("domain_name", meta.(*conns.AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountID))) d.Set("name", output.Name) d.Set("network_origin", output.NetworkOrigin) diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index 3145972777ed..652a0f7b5d14 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -34,6 +34,8 @@ func TestAccS3ControlAccessPoint_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAccessPointExists(resourceName, &v), acctest.CheckResourceAttrAccountID(resourceName, "account_id"), + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points-alias.html: + resource.TestMatchResourceAttr(resourceName, "alias", regexp.MustCompile(`^.*-s3alias$`)), acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", accessPointName)), resource.TestCheckResourceAttr(resourceName, "bucket", bucketName), acctest.MatchResourceAttrRegionalHostname(resourceName, "domain_name", "s3-accesspoint", regexp.MustCompile(fmt.Sprintf("^%s-\\d{12}", accessPointName))), diff --git a/website/docs/r/s3_access_point.html.markdown b/website/docs/r/s3_access_point.html.markdown index 18207e3d0ca3..b2049c6226de 100644 --- a/website/docs/r/s3_access_point.html.markdown +++ b/website/docs/r/s3_access_point.html.markdown @@ -90,6 +90,7 @@ The following arguments are required: In addition to all arguments above, the following attributes are exported: +* `alias` - The alias of the S3 Access Point. * `arn` - Amazon Resource Name (ARN) of the S3 Access Point. * `domain_name` - The DNS domain name of the S3 Access Point in the format _`name`_-_`account_id`_.s3-accesspoint._region_.amazonaws.com. Note: S3 access points only support secure access by HTTPS. HTTP isn't supported. From 8080e13b6fa217a7a773a6a9a167ab1804297537 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 13:15:10 -0500 Subject: [PATCH 207/304] r/aws_s3_access_point: Add 'endpoints' attribute. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_basic -timeout 180m === RUN TestAccS3ControlAccessPoint_basic === PAUSE TestAccS3ControlAccessPoint_basic === CONT TestAccS3ControlAccessPoint_basic --- PASS: TestAccS3ControlAccessPoint_basic (27.17s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 30.729s --- .changelog/19294.txt | 4 ++++ internal/service/s3control/access_point.go | 6 ++++++ internal/service/s3control/access_point_test.go | 1 + website/docs/r/s3_access_point.html.markdown | 1 + 4 files changed, 12 insertions(+) diff --git a/.changelog/19294.txt b/.changelog/19294.txt index a1e2e3ebe7ff..2a9fef4fdab6 100644 --- a/.changelog/19294.txt +++ b/.changelog/19294.txt @@ -16,4 +16,8 @@ aws_s3_access_point: `vpc_configuration.vpc_id` is _ForceNew_ ```release-note:enhancement aws_s3_access_point: Add `alias` attribute +``` + +```release-note:enhancement +aws_s3_access_point: Add `endpoints` attribute ``` \ No newline at end of file diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 453ac3a40766..2b47f0fbd77a 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -53,6 +53,11 @@ func ResourceAccessPoint() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "endpoints": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "has_public_access_policy": { Type: schema.TypeBool, Computed: true, @@ -253,6 +258,7 @@ func resourceAccessPointRead(d *schema.ResourceData, meta interface{}) error { d.Set("account_id", accountID) d.Set("alias", output.Alias) d.Set("domain_name", meta.(*conns.AWSClient).RegionalHostname(fmt.Sprintf("%s-%s.s3-accesspoint", aws.StringValue(output.Name), accountID))) + d.Set("endpoints", aws.StringValueMap(output.Endpoints)) d.Set("name", output.Name) d.Set("network_origin", output.NetworkOrigin) if output.PublicAccessBlockConfiguration != nil { diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index 652a0f7b5d14..3787c06bb240 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -39,6 +39,7 @@ func TestAccS3ControlAccessPoint_basic(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "s3", fmt.Sprintf("accesspoint/%s", accessPointName)), resource.TestCheckResourceAttr(resourceName, "bucket", bucketName), acctest.MatchResourceAttrRegionalHostname(resourceName, "domain_name", "s3-accesspoint", regexp.MustCompile(fmt.Sprintf("^%s-\\d{12}", accessPointName))), + resource.TestCheckResourceAttr(resourceName, "endpoints.%", "4"), resource.TestCheckResourceAttr(resourceName, "has_public_access_policy", "false"), resource.TestCheckResourceAttr(resourceName, "name", accessPointName), resource.TestCheckResourceAttr(resourceName, "network_origin", "Internet"), diff --git a/website/docs/r/s3_access_point.html.markdown b/website/docs/r/s3_access_point.html.markdown index b2049c6226de..97ddc69038b7 100644 --- a/website/docs/r/s3_access_point.html.markdown +++ b/website/docs/r/s3_access_point.html.markdown @@ -94,6 +94,7 @@ In addition to all arguments above, the following attributes are exported: * `arn` - Amazon Resource Name (ARN) of the S3 Access Point. * `domain_name` - The DNS domain name of the S3 Access Point in the format _`name`_-_`account_id`_.s3-accesspoint._region_.amazonaws.com. Note: S3 access points only support secure access by HTTPS. HTTP isn't supported. +* `endpoints` - The VPC endpoints for the S3 Access Point. * `has_public_access_policy` - Indicates whether this access point currently has a policy that allows public access. * `id` - For Access Point of an AWS Partition S3 Bucket, the AWS account ID and access point name separated by a colon (`:`). For S3 on Outposts Bucket, the Amazon Resource Name (ARN) of the Access Point. * `network_origin` - Indicates whether this access point allows access from the public Internet. Values are `VPC` (the access point doesn't allow access from the public Internet) and `Internet` (the access point allows access from the public Internet, subject to the access point and bucket access policies). From 332f720a87b3fd015d33ff7fe143b34e6f46fe13 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 13:22:28 -0500 Subject: [PATCH 208/304] r/aws_s3_access_point: Tidy 'testAccCheckAccessPointPolicyExists'. Acceptance test output: % make testacc PKG_NAME=internal/service/s3control TESTARGS='-run=TestAccS3ControlAccessPoint_policy' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3control/... -v -count 1 -parallel 20 -run=TestAccS3ControlAccessPoint_policy -timeout 180m === RUN TestAccS3ControlAccessPoint_policy === PAUSE TestAccS3ControlAccessPoint_policy === CONT TestAccS3ControlAccessPoint_policy --- PASS: TestAccS3ControlAccessPoint_policy (78.00s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3control 81.501s --- internal/service/s3control/access_point_test.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index 3787c06bb240..17b288b4f863 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -371,22 +370,20 @@ func testAccCheckAccessPointHasPolicy(n string, fn func() string) resource.TestC return fmt.Errorf("No S3 Access Point ID is set") } - accountId, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + accountID, name, err := tfs3control.AccessPointParseResourceID(rs.Primary.ID) + if err != nil { return err } conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlConn - resp, err := conn.GetAccessPointPolicy(&s3control.GetAccessPointPolicyInput{ - AccountId: aws.String(accountId), - Name: aws.String(name), - }) + actualPolicyText, _, err := tfs3control.FindAccessPointPolicyAndStatusByAccountIDAndName(conn, accountID, name) + if err != nil { return err } - actualPolicyText := *resp.Policy expectedPolicyText := fn() equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) From f178361c16a0b9213d01bd47ca473dd8e40d29d9 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Tue, 16 Nov 2021 18:37:21 +0000 Subject: [PATCH 209/304] Update CHANGELOG.md for #21782 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 210379908798..55e90be4a05c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,12 +3,15 @@ FEATURES: * **New Data Source:** `aws_emr_release_labels` ([#21767](https://github.com/hashicorp/terraform-provider-aws/issues/21767)) +* **New Resource:** `aws_appstream_directory_config` ([#21505](https://github.com/hashicorp/terraform-provider-aws/issues/21505)) * **New Resource:** `aws_s3control_multi_region_access_point` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) * **New Resource:** `aws_s3control_multi_region_access_point_policy` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) +* **New Resource:** `aws_securityhub_finding_aggregator` ([#21560](https://github.com/hashicorp/terraform-provider-aws/issues/21560)) ENHANCEMENTS: * resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) +* resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support resource tagging ([#21769](https://github.com/hashicorp/terraform-provider-aws/issues/21769)) * resource/aws_neptune_cluster: Support in-place update of `engine_version` ([#21760](https://github.com/hashicorp/terraform-provider-aws/issues/21760)) * resource/aws_sagemaker_endpoint: Add `deployment_config` argument ([#21765](https://github.com/hashicorp/terraform-provider-aws/issues/21765)) From a446b8dd665457c82312f954fe6126434160686c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Nov 2021 18:47:55 +0000 Subject: [PATCH 210/304] build(deps): bump github.com/aws/aws-sdk-go in /providerlint Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.42.3 to 1.42.5. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.42.3...v1.42.5) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- providerlint/go.mod | 2 +- providerlint/go.sum | 4 +-- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 31 +++++++++++++++++++ .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- providerlint/vendor/modules.txt | 2 +- 5 files changed, 36 insertions(+), 5 deletions(-) diff --git a/providerlint/go.mod b/providerlint/go.mod index 015a28d4dd95..6a20afda22c7 100644 --- a/providerlint/go.mod +++ b/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/providerlint go 1.16 require ( - github.com/aws/aws-sdk-go v1.42.3 + github.com/aws/aws-sdk-go v1.42.5 github.com/bflad/tfproviderlint v0.27.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.8.0 golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb diff --git a/providerlint/go.sum b/providerlint/go.sum index f6851cc3a27b..c30e12c7345c 100644 --- a/providerlint/go.sum +++ b/providerlint/go.sum @@ -70,8 +70,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.42.3 h1:lBKr3tQ06m1uykiychMNKLK1bRfOzaIEQpsI/S3QiNc= -github.com/aws/aws-sdk-go v1.42.3/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.42.5 h1:0xNoQrGh9InmUsT+9qzZ8QLfBEUsnev5BMeED6t6cKI= +github.com/aws/aws-sdk-go v1.42.5/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.27.1 h1:sYlc6R8cQ0NtaCCA7Oh1ld8xfn0oiwn6mm4unooi2fo= diff --git a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index ea12126a7110..10b39cc5eaca 100644 --- a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -12675,6 +12675,31 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "migrationhub-strategy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "mobileanalytics": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14201,6 +14226,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17502,6 +17530,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, diff --git a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index 27a961c1a9c6..92d4ab69914f 100644 --- a/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/providerlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.42.3" +const SDKVersion = "1.42.5" diff --git a/providerlint/vendor/modules.txt b/providerlint/vendor/modules.txt index 4ffb6327e4c4..1aeded448e33 100644 --- a/providerlint/vendor/modules.txt +++ b/providerlint/vendor/modules.txt @@ -14,7 +14,7 @@ github.com/agext/levenshtein github.com/apparentlymart/go-textseg/v12/textseg # github.com/apparentlymart/go-textseg/v13 v13.0.0 github.com/apparentlymart/go-textseg/v13/textseg -# github.com/aws/aws-sdk-go v1.42.3 +# github.com/aws/aws-sdk-go v1.42.5 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From 2feb6ae63fe4e8d8e93188a70de92ceca865c044 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 14:04:25 -0500 Subject: [PATCH 211/304] Add CHANGELOG entry. --- .changelog/21797.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21797.txt diff --git a/.changelog/21797.txt b/.changelog/21797.txt new file mode 100644 index 000000000000..d8ff12c23d0d --- /dev/null +++ b/.changelog/21797.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_route53_resolver_dnssec_config: Increase resource creation and deletion timeouts to 10 minutes +``` \ No newline at end of file From 48c29148f8a3ef61ec5e8d3eb685816f82d99755 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Tue, 16 Nov 2021 19:09:13 +0000 Subject: [PATCH 212/304] Update CHANGELOG.md for #21794 --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55e90be4a05c..b2d4663df01a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,12 +4,17 @@ FEATURES: * **New Data Source:** `aws_emr_release_labels` ([#21767](https://github.com/hashicorp/terraform-provider-aws/issues/21767)) * **New Resource:** `aws_appstream_directory_config` ([#21505](https://github.com/hashicorp/terraform-provider-aws/issues/21505)) +* **New Resource:** `aws_s3control_access_point_policy` ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) * **New Resource:** `aws_s3control_multi_region_access_point` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) * **New Resource:** `aws_s3control_multi_region_access_point_policy` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) +* **New Resource:** `aws_s3control_object_lambda_access_point` ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) +* **New Resource:** `aws_s3control_object_lambda_access_point_policy` ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) * **New Resource:** `aws_securityhub_finding_aggregator` ([#21560](https://github.com/hashicorp/terraform-provider-aws/issues/21560)) ENHANCEMENTS: +* aws_s3_access_point: Add `alias` attribute ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) +* aws_s3_access_point: Add `endpoints` attribute ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) * resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) * resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support resource tagging ([#21769](https://github.com/hashicorp/terraform-provider-aws/issues/21769)) * resource/aws_neptune_cluster: Support in-place update of `engine_version` ([#21760](https://github.com/hashicorp/terraform-provider-aws/issues/21760)) @@ -17,6 +22,9 @@ ENHANCEMENTS: BUG FIXES: +* aws_s3_access_point: `vpc_configuration.vpc_id` is _ForceNew_ ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) +* resource/aws_cloudfront_cache_policy: Fix 0 values for `default_ttl`, `max_ttl` and `min_ttl` arguments ([#21793](https://github.com/hashicorp/terraform-provider-aws/issues/21793)) +* resource/aws_internet_gateway: Allow `available` as a *pending* state during gateway detach ([#21794](https://github.com/hashicorp/terraform-provider-aws/issues/21794)) * resource/aws_security_group: Fix lack of pagination when describing security groups ([#21743](https://github.com/hashicorp/terraform-provider-aws/issues/21743)) ## 3.65.0 (November 11, 2021) From 0c295faa0658a96e28ec4b1f03c96de6d20f4cb6 Mon Sep 17 00:00:00 2001 From: jhole89 Date: Mon, 24 Feb 2020 15:58:28 -0800 Subject: [PATCH 213/304] add new AwsIotThingGroup resource --- .../lambda/resource_aws_iot_thing_group.go | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 internal/service/lambda/resource_aws_iot_thing_group.go diff --git a/internal/service/lambda/resource_aws_iot_thing_group.go b/internal/service/lambda/resource_aws_iot_thing_group.go new file mode 100644 index 000000000000..dfb61dc027a2 --- /dev/null +++ b/internal/service/lambda/resource_aws_iot_thing_group.go @@ -0,0 +1,224 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "log" +) + +func resourceAwsIotThingGroup() *schema.Resource{ + return &schema.Resource{ + Create: resourceAwsIotThingGroupCreate, + Read: resourceAwsIotThingGroupRead, + Update: resourceAwsIotThingGroupUpdate, + Delete: resourceAwsIotThingGroupDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "parent_group_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "properties": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 2, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attributes": { + Type: schema.TypeMap, + Optional: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "tags": tagsSchema(), + "default_client_id": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsIotThingGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iotconn + input := &iot.CreateThingGroupInput{ + ThingGroupName: aws.String(d.Get("name").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().IotTags(), + } + + if v, ok := d.GetOk("parent_group_name"); ok { + input.ParentGroupName = aws.String(v.(string)) + } + if v, ok := d.GetOk("properties"); ok { + input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) + } + + log.Printf("[DEBUG] Creating IoT Thing Group: %s", input) + out, err := conn.CreateThingGroup(input) + if err != nil { + return err + } + + d.SetId(*out.ThingGroupName) + return resourceAwsIotThingGroupRead(d, meta) +} + +func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iotconn + + input := &iot.DescribeThingGroupInput{ + ThingGroupName: aws.String(d.Id()), + } + log.Printf("[DEBUG] Reading IoT Thing Group: %s", input) + out, err := conn.DescribeThingGroup(input) + + if err != nil { + if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] IoT Thing Group %q not found, removing from state", d.Id()) + d.SetId("") + } + return err + } + + log.Printf("[DEBUG] Received IoT Thing Group: %s", out) + + d.Set("arn", out.ThingGroupArn) + d.Set("name", out.ThingGroupName) + if err := d.Set("metadata", flattenIotThingGroupMetadata(out.ThingGroupMetadata)); err != nil { + return fmt.Errorf("error setting metadata #{err}") + } + if err := d.Set("properties", flattenIotThingGroupProperties(out.ThingGroupProperties)); err != nil { + return fmt.Errorf("error setting properties #{err}") + } + d.Set("version", out.Version) + + return nil +} + +func resourceAwsIotThingGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iotconn + + input := &iot.UpdateThingGroupInput{ + ThingGroupName: aws.String(d.Get("name").(string)), + } + + if d.HasChange("properties") { + if v, ok := d.GetOk("properties"); ok { + input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) + } + } + + _, err := conn.UpdateThingGroup(input) + if err != nil { + return err + } + + return resourceAwsIotThingGroupRead(d, meta) +} + +func resourceAwsIotThingGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iotconn + + input := &iot.DeleteThingGroupInput{ + ThingGroupName: aws.String(d.Id()), + } + log.Printf("[DEBUG] Deleting IoT Thing Group: %s", input) + + _, err := conn.DeleteThingGroup(input) + if err != nil { + if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + return nil + } + return err + } + + return nil +} + +func expandIotThingsGroupProperties(l []interface{}) *iot.ThingGroupProperties { + m := l[0].(map[string]interface{}) + + thingGroupProperties := &iot.ThingGroupProperties{} + + if v, ok := m["attributes"]; ok { + thingGroupProperties.AttributePayload = &iot.AttributePayload{ + Attributes: stringMapToPointers(v.(map[string]interface{})), + } + } + + if v, ok := m["description"]; ok { + thingGroupProperties.ThingGroupDescription = aws.String(v.(string)) + } + + return thingGroupProperties +} + +func flattenIotThingGroupProperties(properties *iot.ThingGroupProperties) []map[string]interface{} { + if properties == nil { + return []map[string]interface{}{} + } + + props := map[string]interface{}{ + "attributes": aws.StringValueMap(properties.AttributePayload.Attributes), + "description": aws.StringValue(properties.ThingGroupDescription), + } + + return []map[string]interface{}{props} +} + +func flattenIotThingGroupMetadata(metadata *iot.ThingGroupMetadata) []map[string]interface{} { + if metadata == nil { + return []map[string]interface{}{} + } + + meta := map[string]interface{}{ + "creation_date": aws.TimeValue(metadata.CreationDate), + "parent_group_name": aws.StringValue(metadata.ParentGroupName), + "root_to_parent_groups": expandIotGroupNameAndArnList(metadata.RootToParentThingGroups), + } + + return []map[string]interface{}{meta} +} + +func expandIotGroupNameAndArnList(lgn []*iot.GroupNameAndArn) []*iot.GroupNameAndArn { + vs := make([]*iot.GroupNameAndArn, 0, len(lgn)) + for _, v := range lgn { + val, ok := interface{}(v).(iot.GroupNameAndArn) + if ok && &val != nil { + vs = append(vs, &iot.GroupNameAndArn{ + GroupName: val.GroupName, + GroupArn: val.GroupArn, + }) + } + } + return vs +} \ No newline at end of file From f1973a0bae9590b5c95f8ca78aa5864a4824c98d Mon Sep 17 00:00:00 2001 From: jhole89 Date: Mon, 24 Feb 2020 17:06:28 -0800 Subject: [PATCH 214/304] add AwsIotThingGroup acceptance tests --- .../resource_aws_iot_thing_group_test.go | 202 ++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 internal/service/lambda/resource_aws_iot_thing_group_test.go diff --git a/internal/service/lambda/resource_aws_iot_thing_group_test.go b/internal/service/lambda/resource_aws_iot_thing_group_test.go new file mode 100644 index 000000000000..f5e4582f4e6f --- /dev/null +++ b/internal/service/lambda/resource_aws_iot_thing_group_test.go @@ -0,0 +1,202 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +func TestAccAWSIotThingGroup_basic(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSIotThingGroupConfig_basic(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "0"), + resource.TestCheckResourceAttr(resourceName, "properties.description", ""), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), + resource.TestCheckResourceAttrSet(resourceName, "version"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSIotThingGroup_full(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSIotThingGroupConfig_full(thingGroupName, "42", "this is my thing group"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", fmt.Sprintf("%s_parent", thingGroupName)), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "3"), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.One", "11111"), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.Two", "TwoTwo"), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.Answer", "42"), + resource.TestCheckResourceAttr(resourceName, "properties.description", "this is my thing group"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "tagVal"), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), + resource.TestCheckResourceAttrSet(resourceName, "version"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // Update attribute + Config: testAccAWSIotThingGroupConfig_full(thingGroupName, "7", "this is my other thing group"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", fmt.Sprintf("%s_parent", thingGroupName)), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "3"), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.One", "11111"), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.Two", "TwoTwo"), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.Answer", "7"), + resource.TestCheckResourceAttr(resourceName, "properties.description", "this is my other thing group"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "tagVal"), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), + resource.TestCheckResourceAttrSet(resourceName, "version"), + ), + }, + { // Remove thing group parent association + Config: testAccAWSIotThingConfig_basic(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), + resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "0"), + resource.TestCheckResourceAttr(resourceName, "properties.description", ""), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), + resource.TestCheckResourceAttrSet(resourceName, "version"), + ), + }, + }, + }) +} + +func testAccCheckIotThingGroupExists(n string, thing *iot.DescribeThingGroupOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("no IoT Thing Group ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).iotconn + input := &iot.DescribeThingGroupInput{ + ThingGroupName: aws.String(rs.Primary.ID), + } + resp, err := conn.DescribeThingGroup(input) + if err != nil { + return err + } + + *thing = *resp + + return nil + } +} + +func testAccCheckAWSIotThingGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).iotconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_iot_thing_group" { + continue + } + + input := &iot.DescribeThingGroupInput{ + ThingGroupName: aws.String(rs.Primary.ID), + } + + _, err := conn.DescribeThingGroup(input) + if err != nil { + if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + return nil + } + return err + } + return fmt.Errorf("expected IoT Thing Group to be destroyed, %s found", rs.Primary.ID) + + } + + return nil +} + +func testAccAWSIotThingGroupConfig_basic(thingGroupName string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "test" { + name = "%s" +} +`, thingGroupName) +} + +func testAccAWSIotThingGroupConfig_full(thingGroupName, answer, description string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "parent" { + name = "%s_parent" +} + +resource "aws_iot_thing_group" "test" { + name = "%s" + + parent_group_name = "${aws_iot_thing_group.parent.name}" + + properties { + attributes = { + One = "11111" + Two = "TwoTwo" + Answer = "%s" + } + description = "%s" + } + + tags { + "tagKey" = "tagVal" + } +} +`, thingGroupName, thingGroupName, answer, description) +} From 34e318c64a5996aae1824a31de110ca2e89f9f51 Mon Sep 17 00:00:00 2001 From: jhole89 Date: Fri, 28 Feb 2020 18:11:34 -0800 Subject: [PATCH 215/304] refactor AwsIotThingGroup acceptance tests --- .../lambda/resource_aws_iot_thing_group.go | 81 +++- .../resource_aws_iot_thing_group_test.go | 446 ++++++++++++++++-- 2 files changed, 470 insertions(+), 57 deletions(-) diff --git a/internal/service/lambda/resource_aws_iot_thing_group.go b/internal/service/lambda/resource_aws_iot_thing_group.go index dfb61dc027a2..85280f019947 100644 --- a/internal/service/lambda/resource_aws_iot_thing_group.go +++ b/internal/service/lambda/resource_aws_iot_thing_group.go @@ -10,10 +10,10 @@ import ( "log" ) -func resourceAwsIotThingGroup() *schema.Resource{ +func resourceAwsIotThingGroup() *schema.Resource { return &schema.Resource{ Create: resourceAwsIotThingGroupCreate, - Read: resourceAwsIotThingGroupRead, + Read: resourceAwsIotThingGroupRead, Update: resourceAwsIotThingGroupUpdate, Delete: resourceAwsIotThingGroupDelete, @@ -37,7 +37,7 @@ func resourceAwsIotThingGroup() *schema.Resource{ Type: schema.TypeList, Optional: true, Computed: true, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "attributes": { @@ -52,9 +52,38 @@ func resourceAwsIotThingGroup() *schema.Resource{ }, }, "tags": tagsSchema(), - "default_client_id": { - Type: schema.TypeString, + "metadata": { + Type: schema.TypeList, Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "creation_date": { + Type: schema.TypeInt, + Computed: true, + }, + "parent_group_name": { + Type: schema.TypeString, + Computed: true, + }, + "root_to_parent_groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_arn": { + Type: schema.TypeString, + Computed: true, + }, + "group_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, }, "version": { Type: schema.TypeInt, @@ -108,19 +137,28 @@ func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) erro } return err } - log.Printf("[DEBUG] Received IoT Thing Group: %s", out) d.Set("arn", out.ThingGroupArn) d.Set("name", out.ThingGroupName) + if err := d.Set("metadata", flattenIotThingGroupMetadata(out.ThingGroupMetadata)); err != nil { - return fmt.Errorf("error setting metadata #{err}") + return fmt.Errorf("error setting metadata: %s", err) } if err := d.Set("properties", flattenIotThingGroupProperties(out.ThingGroupProperties)); err != nil { - return fmt.Errorf("error setting properties #{err}") + return fmt.Errorf("error setting properties: %s", err) } d.Set("version", out.Version) + tags, err := keyvaluetags.IotListTags(conn, *out.ThingGroupArn) + if err != nil { + return fmt.Errorf("error listing tags for Iot Thing Group (%s): %s", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } @@ -131,9 +169,17 @@ func resourceAwsIotThingGroupUpdate(d *schema.ResourceData, meta interface{}) er ThingGroupName: aws.String(d.Get("name").(string)), } - if d.HasChange("properties") { - if v, ok := d.GetOk("properties"); ok { - input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) + if v, ok := d.GetOk("properties"); ok { + input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) + } + + if d.HasChange("tags") { + oldTags, newTags := d.GetChange("tags") + + if v, ok := d.GetOk("arn"); ok { + if err := keyvaluetags.IotUpdateTags(conn, v.(string), oldTags, newTags); err != nil { + return fmt.Errorf("error updating Iot Thing Group (%s) tags: %s", d.Id(), err) + } } } @@ -188,10 +234,13 @@ func flattenIotThingGroupProperties(properties *iot.ThingGroupProperties) []map[ } props := map[string]interface{}{ - "attributes": aws.StringValueMap(properties.AttributePayload.Attributes), "description": aws.StringValue(properties.ThingGroupDescription), } + if properties.AttributePayload != nil { + props["attributes"] = aws.StringValueMap(properties.AttributePayload.Attributes) + } + return []map[string]interface{}{props} } @@ -201,8 +250,8 @@ func flattenIotThingGroupMetadata(metadata *iot.ThingGroupMetadata) []map[string } meta := map[string]interface{}{ - "creation_date": aws.TimeValue(metadata.CreationDate), - "parent_group_name": aws.StringValue(metadata.ParentGroupName), + "creation_date": aws.TimeValue(metadata.CreationDate).Unix(), + "parent_group_name": aws.StringValue(metadata.ParentGroupName), "root_to_parent_groups": expandIotGroupNameAndArnList(metadata.RootToParentThingGroups), } @@ -216,9 +265,9 @@ func expandIotGroupNameAndArnList(lgn []*iot.GroupNameAndArn) []*iot.GroupNameAn if ok && &val != nil { vs = append(vs, &iot.GroupNameAndArn{ GroupName: val.GroupName, - GroupArn: val.GroupArn, + GroupArn: val.GroupArn, }) } } return vs -} \ No newline at end of file +} diff --git a/internal/service/lambda/resource_aws_iot_thing_group_test.go b/internal/service/lambda/resource_aws_iot_thing_group_test.go index f5e4582f4e6f..e4c12b7a4284 100644 --- a/internal/service/lambda/resource_aws_iot_thing_group_test.go +++ b/internal/service/lambda/resource_aws_iot_thing_group_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSIotThingGroup_basic(t *testing.T) { +func TestAccAWSIotThingGroup_base(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput rString := acctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) @@ -23,16 +23,15 @@ func TestAccAWSIotThingGroup_basic(t *testing.T) { CheckDestroy: testAccCheckAWSIotThingGroupDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSIotThingGroupConfig_basic(thingGroupName), + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "0"), - resource.TestCheckResourceAttr(resourceName, "properties.description", ""), + resource.TestCheckNoResourceAttr(resourceName, "parent_group_name"), + resource.TestCheckNoResourceAttr(resourceName, "properties"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), resource.TestCheckResourceAttrSet(resourceName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), resource.TestCheckResourceAttrSet(resourceName, "version"), ), }, @@ -49,6 +48,7 @@ func TestAccAWSIotThingGroup_full(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput rString := acctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + parentThingGroupName := thingGroupName + "_parent" resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -56,21 +56,16 @@ func TestAccAWSIotThingGroup_full(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAWSIotThingGroupDestroy, Steps: []resource.TestStep{ - { - Config: testAccAWSIotThingGroupConfig_full(thingGroupName, "42", "this is my thing group"), + { // BASE + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", fmt.Sprintf("%s_parent", thingGroupName)), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "3"), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.One", "11111"), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.Two", "TwoTwo"), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.Answer", "42"), - resource.TestCheckResourceAttr(resourceName, "properties.description", "this is my thing group"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "tagVal"), + resource.TestCheckNoResourceAttr(resourceName, "parent_group_name"), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), resource.TestCheckResourceAttrSet(resourceName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), resource.TestCheckResourceAttrSet(resourceName, "version"), ), }, @@ -79,34 +74,34 @@ func TestAccAWSIotThingGroup_full(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - { // Update attribute - Config: testAccAWSIotThingGroupConfig_full(thingGroupName, "7", "this is my other thing group"), + { // UPDATE full + Config: testAccAWSIotThingGroupConfig_full(thingGroupName, parentThingGroupName, "7", "this is my thing group", "myTag"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", fmt.Sprintf("%s_parent", thingGroupName)), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "3"), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.One", "11111"), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.Two", "TwoTwo"), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.Answer", "7"), - resource.TestCheckResourceAttr(resourceName, "properties.description", "this is my other thing group"), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "7"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my thing group"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "tagVal"), + resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "myTag"), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), resource.TestCheckResourceAttrSet(resourceName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), resource.TestCheckResourceAttrSet(resourceName, "version"), ), }, - { // Remove thing group parent association - Config: testAccAWSIotThingConfig_basic(thingGroupName), + { // DELETE full + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckResourceAttr(resourceName, "properties.attributes.%", "0"), - resource.TestCheckResourceAttr(resourceName, "properties.description", ""), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), + resource.TestCheckNoResourceAttr(resourceName, "properties"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), resource.TestCheckResourceAttrSet(resourceName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "default_client_id"), resource.TestCheckResourceAttrSet(resourceName, "version"), ), }, @@ -114,6 +109,305 @@ func TestAccAWSIotThingGroup_full(t *testing.T) { }) } +func TestAccAWSIotThingGroup_name(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { // CREATE + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // UPDATE + Config: testAccAWSIotThingGroupConfig_base(thingGroupName + "_updated"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "name", thingGroupName+"_updated"), + ), + }, + }, + }) +} + +func TestAccAWSIotThingGroup_tags(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { // BASE + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // CREATE Tags + Config: testAccAWSIotThingGroupConfig_withTags(thingGroupName, "myTag"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "myTag"), + ), + }, + { // UPDATE Tags + Config: testAccAWSIotThingGroupConfig_withTags(thingGroupName, "myUpdatedTag"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "myUpdatedTag"), + ), + }, + { // DELETE Tags + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + }, + }) +} + +func TestAccAWSIotThingGroup_propsAttr(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { // BASE + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // CREATE Properties + Config: testAccAWSIotThingGroupConfig_withPropAttr(thingGroupName, "42"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "42"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", ""), + ), + }, + { // UPDATE Properties + Config: testAccAWSIotThingGroupConfig_withPropAttr(thingGroupName, "7"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "7"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", ""), + ), + }, + { // DELETE Properties + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + ), + }, + }, + }) +} + +func TestAccAWSIotThingGroup_propsDesc(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { // BASE + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // CREATE Properties + Config: testAccAWSIotThingGroupConfig_withPropDesc(thingGroupName, "this is my thing group"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties.0.attributes"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my thing group"), + ), + }, + { // UPDATE Properties + Config: testAccAWSIotThingGroupConfig_withPropDesc(thingGroupName, "this is my updated thing group"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties.0.attributes"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my updated thing group"), + ), + }, + { // DELETE Properties + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + ), + }, + }, + }) +} + +func TestAccAWSIotThingGroup_propsAll(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { // BASE + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // CREATE Properties + Config: testAccAWSIotThingGroupConfig_withPropAll(thingGroupName, "42", "this is my thing group"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "42"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my thing group"), + ), + }, + { // UPDATE Properties + Config: testAccAWSIotThingGroupConfig_withPropAll(thingGroupName, "7", "this is my updated thing group"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "7"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my updated thing group"), + ), + }, + { // DELETE Properties + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "properties"), + ), + }, + }, + }) +} + +func TestAccAWSIotThingGroup_parent(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rString := acctest.RandString(8) + thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + parentThingGroupName := thingGroupName + "_parent" + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckNoResourceAttr(resourceName, "parent_group_name"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // CREATE parent_group_name + Config: testAccAWSIotThingGroupConfig_withParent(thingGroupName, parentThingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName), + ), + }, + { // UPDATE parent_group_name + Config: testAccAWSIotThingGroupConfig_withParent(thingGroupName, parentThingGroupName+"_updated"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName+"_updated"), + ), + }, + { // DELETE parent_group_name + Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), + ), + }, + }, + }) +} + func testAccCheckIotThingGroupExists(n string, thing *iot.DescribeThingGroupOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -160,13 +454,11 @@ func testAccCheckAWSIotThingGroupDestroy(s *terraform.State) error { return err } return fmt.Errorf("expected IoT Thing Group to be destroyed, %s found", rs.Primary.ID) - } - return nil } -func testAccAWSIotThingGroupConfig_basic(thingGroupName string) string { +func testAccAWSIotThingGroupConfig_base(thingGroupName string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { name = "%s" @@ -174,15 +466,14 @@ resource "aws_iot_thing_group" "test" { `, thingGroupName) } -func testAccAWSIotThingGroupConfig_full(thingGroupName, answer, description string) string { +func testAccAWSIotThingGroupConfig_full(thingGroupName, parentThingGroupName, answer, description, tagValue string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "parent" { - name = "%s_parent" + name = "%s" } resource "aws_iot_thing_group" "test" { name = "%s" - parent_group_name = "${aws_iot_thing_group.parent.name}" properties { @@ -194,9 +485,82 @@ resource "aws_iot_thing_group" "test" { description = "%s" } - tags { - "tagKey" = "tagVal" + tags = { + tagKey = "%s" + } +} +`, parentThingGroupName, thingGroupName, answer, description, tagValue) +} + +func testAccAWSIotThingGroupConfig_withTags(thingGroupName, tagValue string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "test" { + name = "%s" + + tags = { + tagKey = "%s" } } -`, thingGroupName, thingGroupName, answer, description) +`, thingGroupName, tagValue) +} + +func testAccAWSIotThingGroupConfig_withPropAttr(thingGroupName, answer string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "test" { + name = "%s" + + properties { + attributes = { + One = "11111" + Two = "TwoTwo" + Answer = "%s" + } + } + +} +`, thingGroupName, answer) +} + +func testAccAWSIotThingGroupConfig_withPropDesc(thingGroupName, description string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "test" { + name = "%s" + + properties { + description = "%s" + } + +} +`, thingGroupName, description) +} + +func testAccAWSIotThingGroupConfig_withPropAll(thingGroupName, answer, description string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "test" { + name = "%s" + + properties { + attributes = { + One = "11111" + Two = "TwoTwo" + Answer = "%s" + } + description = "%s" + } + +} +`, thingGroupName, answer, description) +} + +func testAccAWSIotThingGroupConfig_withParent(thingGroupName, parentThingGroupName string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "parent" { + name = "%s" +} + +resource "aws_iot_thing_group" "test" { + name = "%s" + parent_group_name = "${aws_iot_thing_group.parent.name}" +} +`, parentThingGroupName, thingGroupName) } From 4d14548a0dbc7e2d270c01229d6bf7dffebeb156 Mon Sep 17 00:00:00 2001 From: jhole89 Date: Fri, 28 Feb 2020 20:15:54 -0800 Subject: [PATCH 216/304] refactor iot thing group setters --- .../service/lambda/resource_aws_iot_thing_group.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/internal/service/lambda/resource_aws_iot_thing_group.go b/internal/service/lambda/resource_aws_iot_thing_group.go index 85280f019947..4543035f932f 100644 --- a/internal/service/lambda/resource_aws_iot_thing_group.go +++ b/internal/service/lambda/resource_aws_iot_thing_group.go @@ -139,16 +139,21 @@ func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) erro } log.Printf("[DEBUG] Received IoT Thing Group: %s", out) - d.Set("arn", out.ThingGroupArn) - d.Set("name", out.ThingGroupName) - + if err := d.Set("arn", out.ThingGroupArn); err != nil { + return fmt.Errorf("error setting arn: %s", err) + } + if err := d.Set("name", out.ThingGroupName); err != nil { + return fmt.Errorf("error setting name: %s", err) + } if err := d.Set("metadata", flattenIotThingGroupMetadata(out.ThingGroupMetadata)); err != nil { return fmt.Errorf("error setting metadata: %s", err) } if err := d.Set("properties", flattenIotThingGroupProperties(out.ThingGroupProperties)); err != nil { return fmt.Errorf("error setting properties: %s", err) } - d.Set("version", out.Version) + if err := d.Set("arn", out.Version); err != nil { + return fmt.Errorf("error setting version: %s", err) + } tags, err := keyvaluetags.IotListTags(conn, *out.ThingGroupArn) if err != nil { From f7637b33e9fb3122be4b11fd54a5747e4ad7d44d Mon Sep 17 00:00:00 2001 From: jhole89 Date: Fri, 28 Feb 2020 21:00:22 -0800 Subject: [PATCH 217/304] add aws_iot_thing_group docs --- website/docs/r/iot_thing_group.html.markdown | 65 ++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 website/docs/r/iot_thing_group.html.markdown diff --git a/website/docs/r/iot_thing_group.html.markdown b/website/docs/r/iot_thing_group.html.markdown new file mode 100644 index 000000000000..f40c6a07aa5d --- /dev/null +++ b/website/docs/r/iot_thing_group.html.markdown @@ -0,0 +1,65 @@ +--- +subcategory: "IoT" +layout: "aws" +page_title: "AWS: aws_iot_thing_group" +description: |- + Creates and manages an AWS IoT Thing Group. +--- + +# Resource: aws_iot_thing_group + +Creates and manages an AWS IoT Thing Group. + +## Example Usage + +```hcl +resource "aws_iot_thing_group" "parent" { + name = "parent" +} + +resource "aws_iot_thing_group" "example" { + name = "example" + + parent_group_name = "${aws_iot_thing_group.parent.name}" + + properties { + attributes = { + One = "11111" + Two = "TwoTwo" + } + description = "This is my thing group" + } + + tags = { + terraform = "true" + } +} +``` + +## Argument Reference + +* `name` - (Required) The name of the Thing Group. +* `parent_group_name` - (Optional) The name of the parent Thing Group. +* `properties` - (Optional) The Thing Group properties. Defined below. +* `tags` - (Optional) Key-value mapping of resource tags + +## properties Reference + +* `attributes` - (Optional) Map of attributes of the Thing Group. +* `description` - (Optional) A description of the Thing Group. + +## Attributes Reference + +In addition to the arguments above, the following attributes are exported: + +* `id` - The Thing Group ID. +* `version` - The current version of the Thing Group record in the registry. +* `arn` - The ARN of the Thing Group. + +## Import + +IoT Things Groups can be imported using the name, e.g. + +``` +$ terraform import aws_iot_thing_group.example example +``` From f58aa68fbf558a59d8529fb519c6ed35dcfe88d8 Mon Sep 17 00:00:00 2001 From: jhole89 Date: Fri, 28 Feb 2020 21:17:55 -0800 Subject: [PATCH 218/304] Revert "refactor iot thing group setters" This reverts commit c4ebc4127bd1e2866c172522331ac75cb21e9f06. --- .../service/lambda/resource_aws_iot_thing_group.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/internal/service/lambda/resource_aws_iot_thing_group.go b/internal/service/lambda/resource_aws_iot_thing_group.go index 4543035f932f..85280f019947 100644 --- a/internal/service/lambda/resource_aws_iot_thing_group.go +++ b/internal/service/lambda/resource_aws_iot_thing_group.go @@ -139,21 +139,16 @@ func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) erro } log.Printf("[DEBUG] Received IoT Thing Group: %s", out) - if err := d.Set("arn", out.ThingGroupArn); err != nil { - return fmt.Errorf("error setting arn: %s", err) - } - if err := d.Set("name", out.ThingGroupName); err != nil { - return fmt.Errorf("error setting name: %s", err) - } + d.Set("arn", out.ThingGroupArn) + d.Set("name", out.ThingGroupName) + if err := d.Set("metadata", flattenIotThingGroupMetadata(out.ThingGroupMetadata)); err != nil { return fmt.Errorf("error setting metadata: %s", err) } if err := d.Set("properties", flattenIotThingGroupProperties(out.ThingGroupProperties)); err != nil { return fmt.Errorf("error setting properties: %s", err) } - if err := d.Set("arn", out.Version); err != nil { - return fmt.Errorf("error setting version: %s", err) - } + d.Set("version", out.Version) tags, err := keyvaluetags.IotListTags(conn, *out.ThingGroupArn) if err != nil { From 706fb886f36cca8b6bc875c4bad50726e0ec74a2 Mon Sep 17 00:00:00 2001 From: Nils Thenhausen Date: Thu, 17 Dec 2020 17:59:32 +0100 Subject: [PATCH 219/304] fix linter --- .../lambda/resource_aws_iot_thing_group.go | 15 ++++++++------- .../lambda/resource_aws_iot_thing_group_test.go | 14 +++++++------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/internal/service/lambda/resource_aws_iot_thing_group.go b/internal/service/lambda/resource_aws_iot_thing_group.go index 85280f019947..b930ce28bd37 100644 --- a/internal/service/lambda/resource_aws_iot_thing_group.go +++ b/internal/service/lambda/resource_aws_iot_thing_group.go @@ -2,12 +2,13 @@ package aws import ( "fmt" + "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" - "log" ) func resourceAwsIotThingGroup() *schema.Resource { @@ -37,12 +38,12 @@ func resourceAwsIotThingGroup() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "attributes": { Type: schema.TypeMap, Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "description": { Type: schema.TypeString, @@ -55,7 +56,6 @@ func resourceAwsIotThingGroup() *schema.Resource { "metadata": { Type: schema.TypeList, Computed: true, - MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "creation_date": { @@ -123,6 +123,7 @@ func resourceAwsIotThingGroupCreate(d *schema.ResourceData, meta interface{}) er func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).iotconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig input := &iot.DescribeThingGroupInput{ ThingGroupName: aws.String(d.Id()), @@ -155,7 +156,7 @@ func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("error listing tags for Iot Thing Group (%s): %s", d.Id(), err) } - if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -262,7 +263,7 @@ func expandIotGroupNameAndArnList(lgn []*iot.GroupNameAndArn) []*iot.GroupNameAn vs := make([]*iot.GroupNameAndArn, 0, len(lgn)) for _, v := range lgn { val, ok := interface{}(v).(iot.GroupNameAndArn) - if ok && &val != nil { + if ok { vs = append(vs, &iot.GroupNameAndArn{ GroupName: val.GroupName, GroupArn: val.GroupArn, diff --git a/internal/service/lambda/resource_aws_iot_thing_group_test.go b/internal/service/lambda/resource_aws_iot_thing_group_test.go index e4c12b7a4284..6eb0ace55746 100644 --- a/internal/service/lambda/resource_aws_iot_thing_group_test.go +++ b/internal/service/lambda/resource_aws_iot_thing_group_test.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSIotThingGroup_base(t *testing.T) { @@ -473,8 +473,8 @@ resource "aws_iot_thing_group" "parent" { } resource "aws_iot_thing_group" "test" { - name = "%s" - parent_group_name = "${aws_iot_thing_group.parent.name}" + name = "%s" + parent_group_name = aws_iot_thing_group.parent.name properties { attributes = { @@ -559,8 +559,8 @@ resource "aws_iot_thing_group" "parent" { } resource "aws_iot_thing_group" "test" { - name = "%s" - parent_group_name = "${aws_iot_thing_group.parent.name}" + name = "%s" + parent_group_name = aws_iot_thing_group.parent.name } `, parentThingGroupName, thingGroupName) } From 19b44b2aa1f2d4371a9c04850612650638127e20 Mon Sep 17 00:00:00 2001 From: Nils Thenhausen Date: Thu, 17 Dec 2020 18:07:11 +0100 Subject: [PATCH 220/304] lint fix --- website/docs/r/iot_thing_group.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/iot_thing_group.html.markdown b/website/docs/r/iot_thing_group.html.markdown index f40c6a07aa5d..0423bc30f762 100644 --- a/website/docs/r/iot_thing_group.html.markdown +++ b/website/docs/r/iot_thing_group.html.markdown @@ -20,12 +20,12 @@ resource "aws_iot_thing_group" "parent" { resource "aws_iot_thing_group" "example" { name = "example" - parent_group_name = "${aws_iot_thing_group.parent.name}" + parent_group_name = aws_iot_thing_group.parent.name properties { attributes = { - One = "11111" - Two = "TwoTwo" + One = "11111" + Two = "TwoTwo" } description = "This is my thing group" } From bd7611b7a6d2bf668318182c7956efa49d3d49ae Mon Sep 17 00:00:00 2001 From: Nils Thenhausen Date: Mon, 21 Dec 2020 09:20:59 +0100 Subject: [PATCH 221/304] fix lint --- internal/service/lambda/resource_aws_iot_thing_group.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/lambda/resource_aws_iot_thing_group.go b/internal/service/lambda/resource_aws_iot_thing_group.go index b930ce28bd37..96e38ac47894 100644 --- a/internal/service/lambda/resource_aws_iot_thing_group.go +++ b/internal/service/lambda/resource_aws_iot_thing_group.go @@ -117,7 +117,7 @@ func resourceAwsIotThingGroupCreate(d *schema.ResourceData, meta interface{}) er return err } - d.SetId(*out.ThingGroupName) + d.SetId(aws.StringValue(out.ThingGroupName)) return resourceAwsIotThingGroupRead(d, meta) } From 254100689d47c932be19e9f9cc6e38827526da51 Mon Sep 17 00:00:00 2001 From: Oleksiy Veretiuk Date: Mon, 21 Oct 2019 10:49:01 +0300 Subject: [PATCH 222/304] Add resource to add iot thing in thing group --- ...resource_aws_iot_thing_group_attachment.go | 164 ++++++++++++++++++ ...rce_aws_iot_thing_group_attachment_test.go | 131 ++++++++++++++ .../iot_thing_group_attachment.html.markdown | 34 ++++ 3 files changed, 329 insertions(+) create mode 100644 internal/service/lambda/resource_aws_iot_thing_group_attachment.go create mode 100644 internal/service/lambda/resource_aws_iot_thing_group_attachment_test.go create mode 100644 website/docs/r/iot_thing_group_attachment.html.markdown diff --git a/internal/service/lambda/resource_aws_iot_thing_group_attachment.go b/internal/service/lambda/resource_aws_iot_thing_group_attachment.go new file mode 100644 index 000000000000..b9245696d187 --- /dev/null +++ b/internal/service/lambda/resource_aws_iot_thing_group_attachment.go @@ -0,0 +1,164 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsIotThingGroupAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIotThingGroupAttachmentCreate, + Read: resourceAwsIotThingGroupAttachmentRead, + Delete: resourceAwsIotThingGroupAttachmentDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsIotThingGroupAttachmentImport, + }, + + Schema: map[string]*schema.Schema{ + "thing_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "thing_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "override_dynamics_group": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIotThingGroupAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iotconn + + params := &iot.AddThingToThingGroupInput{} + params.ThingName = aws.String(d.Get("thing_name").(string)) + params.ThingGroupName = aws.String(d.Get("thing_group_name").(string)) + + if v, ok := d.GetOk("override_dynamics_group"); ok { + params.OverrideDynamicGroups = aws.Bool(v.(bool)) + } + + _, err := conn.AddThingToThingGroup(params) + + if err != nil { + return err + } + + d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-%s", *params.ThingName, *params.ThingGroupName))) + + return resourceAwsIotThingGroupAttachmentRead(d, meta) +} + +func resourceAwsIotThingGroupAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iotconn + + thingName := d.Get("thing_name").(string) + thingGroupName := d.Get("thing_group_name").(string) + + hasThingGroup, err := iotThingHasThingGroup(conn, thingName, thingGroupName, "") + + if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] IoT Thing (%s) is not found", thingName) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error finding IoT Thing Group (%s) of thing (%s): %s", thingGroupName, thingName, err) + } + + if !hasThingGroup { + log.Printf("[WARN] IoT Thing Group (%s) is not found in Thing (%s) group list", thingGroupName, thingName) + d.SetId("") + return nil + } + + d.Set("thing_name", thingName) + d.Set("thing_group_name", thingGroupName) + if v, ok := d.GetOk("override_dynamics_group"); ok { + d.Set("override_dynamics_group", v.(bool)) + } + + return nil +} + +func resourceAwsIotThingGroupAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iotconn + + params := &iot.RemoveThingFromThingGroupInput{} + params.ThingName = aws.String(d.Get("thing_name").(string)) + params.ThingGroupName = aws.String(d.Get("thing_group_name").(string)) + + _, err := conn.RemoveThingFromThingGroup(params) + + if err != nil { + return err + } + + return nil +} + +func resourceAwsIotThingGroupAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + idParts := strings.SplitN(d.Id(), "/", 2) + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return nil, fmt.Errorf("unexpected format of ID (%q), expected /", d.Id()) + } + + thingName := idParts[0] + thingGroupName := idParts[1] + + d.Set("thing_name", thingName) + d.Set("thing_group_name", thingGroupName) + + d.SetId(fmt.Sprintf("%s-%s", thingName, thingGroupName)) + + return []*schema.ResourceData{d}, nil +} + +func iotThingHasThingGroup(conn *iot.IoT, thingName string, thingGroupName string, nextToken string) (bool, error) { + maxResults := int64(20) + + params := &iot.ListThingGroupsForThingInput{ + MaxResults: aws.Int64(maxResults), + ThingName: aws.String(thingName), + } + + if len(nextToken) > 0 { + params.NextToken = aws.String(nextToken) + } + + out, err := conn.ListThingGroupsForThing(params) + if err != nil { + return false, err + } + + // Check if searched group is in current collection + // If it is return true + for _, group := range out.ThingGroups { + if thingGroupName == *group.GroupName { + return true, nil + } + } + + // If group that we searched for not appear in current list + // then check if NextToken exists. If it is so call hasThingGroup + // recursively to search in next part of list. Otherwise return false + if out.NextToken != nil { + return iotThingHasThingGroup(conn, thingName, thingGroupName, *out.NextToken) + } else { + return false, nil + } +} diff --git a/internal/service/lambda/resource_aws_iot_thing_group_attachment_test.go b/internal/service/lambda/resource_aws_iot_thing_group_attachment_test.go new file mode 100644 index 000000000000..69fa53b795ae --- /dev/null +++ b/internal/service/lambda/resource_aws_iot_thing_group_attachment_test.go @@ -0,0 +1,131 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/iot" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +func TestAccAWSIotThingGroupAttachment_basic(t *testing.T) { + rString := acctest.RandString(8) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSIotThingGroupAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSIotThingGroupAttachmentConfig_basic(rString), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_iot_thing_group_attachment.test_attachment", "thing_name", fmt.Sprintf("test_thing_%s", rString)), + resource.TestCheckResourceAttr("aws_iot_thing_group_attachment.test_attachment", "thing_group_name", fmt.Sprintf("test_group_%s", rString)), + resource.TestCheckResourceAttr("aws_iot_thing_group_attachment.test_attachment", "override_dynamics_group", "false"), + testAccAWSIotThingGroupAttachmentExists_basic(rString), + ), + }, + { + ResourceName: "aws_iot_thing_group_attachment.test_attachment", + ImportStateIdFunc: testAccAWSIotThingGroupAttachmentImportStateIdFunc("aws_iot_thing_group_attachment.test_attachment"), + ImportState: true, + // We do not have a way to align IDs since the Create function uses resource.PrefixedUniqueId() + // Failed state verification, resource with ID ROLE-POLICYARN not found + // ImportStateVerify: true, + }, + }, + }) +} + +func testAccAWSIotThingGroupAttachmentExists_basic(rString string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + conn := testAccProvider.Meta().(*AWSClient).iotconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_iot_thing_group_attachment" { + continue + } + + thingName := rs.Primary.Attributes["thing_name"] + thingGroupName := rs.Primary.Attributes["thing_group_name"] + hasThingGroup, err := iotThingHasThingGroup(conn, thingName, thingGroupName, "") + + if err != nil { + return err + } + + if !hasThingGroup { + return fmt.Errorf("IoT Thing (%s) is not in IoT Thing Group (%s)", thingName, thingGroupName) + } + + return nil + } + return nil + } +} + +func testAccCheckAWSIotThingGroupAttachmentDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).iotconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_iot_thing_group_attachment" { + continue + } + + thingName := rs.Primary.Attributes["thing_name"] + thingGroupName := rs.Primary.Attributes["thing_group_name"] + + hasThingGroup, err := iotThingHasThingGroup(conn, thingName, thingGroupName, "") + + if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + return nil + } + + if err != nil { + return err + } + + if hasThingGroup { + return fmt.Errorf("IoT Thing (%s) still in IoT Thing Group (%s)", thingName, thingGroupName) + } + } + return nil +} + +func testAccAWSIotThingGroupAttachmentImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s/%s", rs.Primary.Attributes["thing_name"], rs.Primary.Attributes["thing_group_name"]), nil + } +} + +func testAccAWSIotThingGroupAttachmentConfig_basic(rString string) string { + return fmt.Sprintf(` +resource "aws_iot_thing" "test_thing" { + name = "test_thing_%s" +} + +resource "aws_iot_thing_group" "test_thing_group" { + name = "test_group_%[1]s" + properties { + attributes = { + "attr1": "val1", + } + merge = false + } +} + +resource "aws_iot_thing_group_attachment" "test_attachment" { + thing_name = "${aws_iot_thing.test_thing.name}" + thing_group_name = "${aws_iot_thing_group.test_thing_group.name}" + override_dynamics_group = false +} +`, rString) +} diff --git a/website/docs/r/iot_thing_group_attachment.html.markdown b/website/docs/r/iot_thing_group_attachment.html.markdown new file mode 100644 index 000000000000..0d2bcafabf04 --- /dev/null +++ b/website/docs/r/iot_thing_group_attachment.html.markdown @@ -0,0 +1,34 @@ +--- +layout: "aws" +page_title: "AWS: aws_iot_thing_group_attachment" +description: |- + Allow to add IoT Thing to IoT Thing Group. +--- + +# Resource: aws_iot_thing_group_attachment + +Allow to add IoT Thing to IoT Thing Group. + +## Example Usage + +```hcl +resource "aws_iot_thing_group_attachment_attachment" "test_attachment" { + thing_name = "test_thing_name" + thing_group_name = "test_thing_group_name" + override_dynamics_group = false +} +``` + +## Argument Reference + +* `thing_name` - (Required, Forces New Resource). The name of the thing to add to a group. +* `thing_group_name` - (Required, Forces New Resource). The name of the group to which you are adding a thing. +* `override_dynamics_group` - (Optional) Bool. Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. + +## Import + +IOT Thing Group Attachment can be imported using the name of thing and thing group. + +``` +$ terraform import aws_iot_thing_group_attachment.test_attachment thing_name/thing_group +``` From 0e11689294efe4bd2d50baad2a94af2d231fc7e1 Mon Sep 17 00:00:00 2001 From: Oleksiy Veretiuk Date: Fri, 25 Oct 2019 12:58:40 +0300 Subject: [PATCH 223/304] typo in docs --- website/docs/r/iot_thing_group_attachment.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/iot_thing_group_attachment.html.markdown b/website/docs/r/iot_thing_group_attachment.html.markdown index 0d2bcafabf04..437d59e8e61c 100644 --- a/website/docs/r/iot_thing_group_attachment.html.markdown +++ b/website/docs/r/iot_thing_group_attachment.html.markdown @@ -12,7 +12,7 @@ Allow to add IoT Thing to IoT Thing Group. ## Example Usage ```hcl -resource "aws_iot_thing_group_attachment_attachment" "test_attachment" { +resource "aws_iot_thing_group_attachment" "test_attachment" { thing_name = "test_thing_name" thing_group_name = "test_thing_group_name" override_dynamics_group = false From 41241e6f1cabc04878e02edbee5b1ee8fc8af842 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Tue, 16 Nov 2021 19:14:33 +0000 Subject: [PATCH 224/304] Update CHANGELOG.md for #21777 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2d4663df01a..c08aa6d0aaeb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ ENHANCEMENTS: BUG FIXES: * aws_s3_access_point: `vpc_configuration.vpc_id` is _ForceNew_ ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) +* resource/aws_autoscaling_group: Fix pending state in instance refresh ([#21777](https://github.com/hashicorp/terraform-provider-aws/issues/21777)) * resource/aws_cloudfront_cache_policy: Fix 0 values for `default_ttl`, `max_ttl` and `min_ttl` arguments ([#21793](https://github.com/hashicorp/terraform-provider-aws/issues/21793)) * resource/aws_internet_gateway: Allow `available` as a *pending* state during gateway detach ([#21794](https://github.com/hashicorp/terraform-provider-aws/issues/21794)) * resource/aws_security_group: Fix lack of pagination when describing security groups ([#21743](https://github.com/hashicorp/terraform-provider-aws/issues/21743)) From b799007ed941081dc0bf0e6c6e1f670fd5b4a1e7 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 10 Nov 2021 17:10:38 -0800 Subject: [PATCH 225/304] Fixes Semgrep filepaths for passing checks --- .semgrep.yml | 74 +++++++++++++++++++----------------- internal/tfresource/retry.go | 2 +- 2 files changed, 40 insertions(+), 36 deletions(-) diff --git a/.semgrep.yml b/.semgrep.yml index b5ca7b36efbc..7b3ad94cd954 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -121,7 +121,7 @@ rules: message: Prefer AWS Go SDK pointer conversion aws.StringValue() function for dereferencing during d.SetId() paths: include: - - aws/ + - internal/ pattern: 'd.SetId(*$VALUE)' severity: WARNING @@ -131,7 +131,7 @@ rules: message: Using AWS Go SDK pointer conversion, e.g. aws.String(), with immediate dereferencing is extraneous paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: '*aws.Bool($VALUE)' @@ -164,7 +164,7 @@ rules: message: Using `acctest.RandInt()` in constant or variable declaration will execute during compilation and not randomize, pass into string generating function instead paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: const $CONST = fmt.Sprintf(..., <... acctest.RandInt() ...>, ...) @@ -176,7 +176,7 @@ rules: message: Using `acctest.RandString()` in constant or variable declaration will execute during compilation and not randomize, pass into string generating function instead paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: const $CONST = fmt.Sprintf(..., <... acctest.RandString(...) ...>, ...) @@ -188,7 +188,7 @@ rules: message: Using `acctest.RandomWithPrefix()` in constant or variable declaration will execute during compilation and not randomize, pass into string generating function instead paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: const $CONST = fmt.Sprintf(..., <... acctest.RandomWithPrefix(...) ...>, ...) @@ -200,7 +200,7 @@ rules: message: Prefer `flattenStringSet()` function for casting a list of string pointers to a set paths: include: - - aws/ + - internal/ pattern: schema.NewSet(schema.HashString, flattenStringList($APIOBJECT)) severity: WARNING @@ -209,7 +209,7 @@ rules: message: Prefer `expandStringSet()` function for casting a set to a list of string pointers paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: expandStringList($SET.List()) @@ -224,7 +224,7 @@ rules: message: Zero value conditional check after `d.GetOk()` is extraneous paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: if $VALUE, $OK := d.GetOk($KEY); $OK && $VALUE.(bool) { $BODY } @@ -240,7 +240,7 @@ rules: message: AWS Go SDK pointer conversion function for `d.Set()` value is extraneous paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: d.Set($ATTRIBUTE, aws.BoolValue($APIOBJECT)) @@ -251,28 +251,29 @@ rules: - pattern: d.Set($ATTRIBUTE, aws.StringValue($APIOBJECT)) severity: WARNING - - id: helper-schema-ResourceData-DataSource-Set-tags - languages: [go] - message: (schema.ResourceData).Set() call with the tags key should include IgnoreConfig in the value - paths: - include: - - aws/data_source*.go - exclude: - - aws/resource*.go - patterns: - - pattern-inside: func $READMETHOD(...) $ERRORTYPE { ... } - - pattern: if err := d.Set("tags", $TAGSMAP); err != nil { ... } - - pattern-not: if err := d.Set("tags", $KEYVALUETAGS.IgnoreAws().IgnoreConfig($CONFIG).Map()); err != nil { ... } - severity: WARNING + # Not sure why this isn't working + # - id: helper-schema-ResourceData-DataSource-Set-tags + # languages: [go] + # message: (schema.ResourceData).Set() call with the tags key should include IgnoreConfig in the value + # paths: + # include: + # - internal/service/**/*_data_source.go + # exclude: + # - internal/service/**/*.go + # patterns: + # - pattern-inside: func $READMETHOD(...) $ERRORTYPE { ... } + # - pattern: if err := d.Set("tags", $TAGSMAP); err != nil { ... } + # - pattern-not: if err := d.Set("tags", $KEYVALUETAGS.IgnoreAws().IgnoreConfig($CONFIG).Map()); err != nil { ... } + # severity: WARNING - id: helper-schema-ResourceData-Resource-Set-tags languages: [go] message: (schema.ResourceData).Set() call with the tags key should be preceded by a call to IgnoreConfig or include IgnoreConfig in the value in the case of ASG paths: include: - - aws/resource*.go + - internal/service/**/*.go exclude: - - aws/data_source*.go + - internal/service/**/*_data_source.go patterns: - pattern-inside: func $READMETHOD(...) $ERRORTYPE { ... } - pattern-either: @@ -301,6 +302,9 @@ rules: - pattern-not: | tags = keyvaluetags.$VALUETAGS($RESOURCETAGS).IgnoreAws().IgnoreConfig($CONFIG) ... + - pattern-not: | + tags = $VALUETAGS($RESOURCETAGS).IgnoreAWS().IgnoreConfig($CONFIG).Ignore($IGNORE) + ... severity: WARNING - id: helper-schema-ResourceData-SetId-empty-without-IsNewResource-check @@ -352,9 +356,9 @@ rules: paths: exclude: - "*_test.go" - - aws/internal/tfresource/*.go + - sweep.go include: - - aws/ + - internal/ patterns: - pattern-either: - patterns: @@ -434,7 +438,7 @@ rules: exclude: - "*_test.go" include: - - aws/ + - internal/ patterns: - pattern-either: - patterns: @@ -486,7 +490,7 @@ rules: message: Check for resource.NotFoundError errors with tfresource.NotFound() paths: include: - - aws/ + - internal/ patterns: - pattern-either: - patterns: @@ -510,7 +514,7 @@ rules: message: Use time.Equal() instead of == paths: include: - - aws/ + - internal/ patterns: - pattern-either: - pattern: | @@ -536,7 +540,7 @@ rules: message: Use lastPage for bool variable in pagination functions paths: include: - - aws/ + - internal/ patterns: - pattern: | $X.$Z(..., func(..., $Y bool) { @@ -559,10 +563,10 @@ rules: languages: [go] message: Do not call `fmt.Print` and variant paths: + include: + - internal/ exclude: - providerlint/vendor/ - include: - - aws/ patterns: - pattern-either: - pattern: | @@ -595,7 +599,7 @@ rules: message: Use default email address or generate a random email address. https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md#hardcoded-email-addresses paths: include: - - aws/ + - internal/ patterns: - pattern-regex: '[-_A-Za-z0-9.+]+@([-A-Za-z0-9]+\.)(com|net|org)' - pattern-not-regex: 'no-reply@hashicorp\.com' @@ -607,7 +611,7 @@ rules: message: Generate random SSH keys using acctest.RandSSHKeyPair() or RandSSHKeyPairSize(). https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md#hardcoded-ssh-key paths: include: - - aws/ + - internal/ exclude: - providerlint/vendor/ patterns: @@ -622,7 +626,7 @@ rules: message: Incorrect form of non-tags change detection. https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/contribution-checklists.md#resource-tagging-code-implementation paths: include: - - aws/ + - internal/ patterns: - pattern: 'if d.HasChangeExcept("tags_all") {...}' severity: WARNING diff --git a/internal/tfresource/retry.go b/internal/tfresource/retry.go index 1fc7e9990e6d..5b3e8f2a4697 100644 --- a/internal/tfresource/retry.go +++ b/internal/tfresource/retry.go @@ -21,7 +21,7 @@ type Retryable func(error) (bool, error) func RetryWhenContext(ctx context.Context, timeout time.Duration, f func() (interface{}, error), retryable Retryable) (interface{}, error) { var output interface{} - err := resource.Retry(timeout, func() *resource.RetryError { + err := resource.Retry(timeout, func() *resource.RetryError { // nosemgrep: helper-schema-resource-Retry-without-TimeoutError-check var err error output, err = f() From b7e7c00d521d8f2c52b7898715ea622285e93cde Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 14:19:59 -0500 Subject: [PATCH 226/304] Rename new resource and acceptance tests files. --- .../resource_aws_iot_thing_group.go => iot/thing_group.go} | 0 .../thing_group_membership.go} | 0 .../thing_group_membership_test.go} | 0 .../thing_group_test.go} | 0 ...ent.html.markdown => iot_thing_group_membership.html.markdown} | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename internal/service/{lambda/resource_aws_iot_thing_group.go => iot/thing_group.go} (100%) rename internal/service/{lambda/resource_aws_iot_thing_group_attachment.go => iot/thing_group_membership.go} (100%) rename internal/service/{lambda/resource_aws_iot_thing_group_attachment_test.go => iot/thing_group_membership_test.go} (100%) rename internal/service/{lambda/resource_aws_iot_thing_group_test.go => iot/thing_group_test.go} (100%) rename website/docs/r/{iot_thing_group_attachment.html.markdown => iot_thing_group_membership.html.markdown} (100%) diff --git a/internal/service/lambda/resource_aws_iot_thing_group.go b/internal/service/iot/thing_group.go similarity index 100% rename from internal/service/lambda/resource_aws_iot_thing_group.go rename to internal/service/iot/thing_group.go diff --git a/internal/service/lambda/resource_aws_iot_thing_group_attachment.go b/internal/service/iot/thing_group_membership.go similarity index 100% rename from internal/service/lambda/resource_aws_iot_thing_group_attachment.go rename to internal/service/iot/thing_group_membership.go diff --git a/internal/service/lambda/resource_aws_iot_thing_group_attachment_test.go b/internal/service/iot/thing_group_membership_test.go similarity index 100% rename from internal/service/lambda/resource_aws_iot_thing_group_attachment_test.go rename to internal/service/iot/thing_group_membership_test.go diff --git a/internal/service/lambda/resource_aws_iot_thing_group_test.go b/internal/service/iot/thing_group_test.go similarity index 100% rename from internal/service/lambda/resource_aws_iot_thing_group_test.go rename to internal/service/iot/thing_group_test.go diff --git a/website/docs/r/iot_thing_group_attachment.html.markdown b/website/docs/r/iot_thing_group_membership.html.markdown similarity index 100% rename from website/docs/r/iot_thing_group_attachment.html.markdown rename to website/docs/r/iot_thing_group_membership.html.markdown From 5ec9625d07f816ff911aea9994e3eb845d0a713e Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Tue, 16 Nov 2021 13:32:39 -0600 Subject: [PATCH 227/304] autoscaling_group: Link to API docs for metrics values (#21779) * autoscaling_group: Link to API docs for metrics values * Remove trailing whitespace --- website/docs/r/autoscaling_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index 818027641e38..c53762683305 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -374,7 +374,7 @@ Note that if you suspend either the `Launch` or `Terminate` process types, it ca * `tags` (Optional) Set of maps containing resource tags. Conflicts with `tag`. See [Tags](#tag-and-tags) below for more details. * `placement_group` (Optional) The name of the placement group into which you'll launch your instances, if any. * `metrics_granularity` - (Optional) The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`. -* `enabled_metrics` - (Optional) A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`. +* `enabled_metrics` - (Optional) A list of metrics to collect. The allowed values are defined by the [underlying AWS API](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html). * `wait_for_capacity_timeout` (Default: "10m") A maximum [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should wait for ASG instances to be healthy before timing out. (See also [Waiting From 857b07e89429927cf88c96c5973733e5dc6dc815 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Tue, 16 Nov 2021 11:32:57 -0800 Subject: [PATCH 228/304] action to add roadmap milestone items to working board (#21798) --- .github/workflows/roadmap_milestone.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/roadmap_milestone.yml diff --git a/.github/workflows/roadmap_milestone.yml b/.github/workflows/roadmap_milestone.yml new file mode 100644 index 000000000000..41d53c5e6739 --- /dev/null +++ b/.github/workflows/roadmap_milestone.yml @@ -0,0 +1,15 @@ +name: If roadmap milestone is assigned, add to working board. +on: + issues: + types: [milestoned] +jobs: + AddRoadmapItemsToBoard: + runs-on: ubuntu-latest + steps: + - name: Move Roadmap Items To Working Board + uses: alex-page/github-project-automation-plus@v0.8.1 + if: github.event.issue.milestone.title == 'Roadmap' + with: + project: AWS Provider Working Board + column: To Do + repo-token: ${{ secrets.ORGSCOPED_GITHUB_TOKEN}} From 9708ff960fe8958f4045d3684a6bf61924bb3fd2 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 15 Nov 2021 16:40:57 -0800 Subject: [PATCH 229/304] Restores `data-source-with-resource-read` --- .semgrep.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.semgrep.yml b/.semgrep.yml index 7b3ad94cd954..5b06b93339f5 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -146,7 +146,7 @@ rules: message: Calling a resource's Read method from within a data-source is discouraged paths: include: - - aws/data_source_aws_*.go + - internal/service/**/*_data_source.go patterns: - pattern-regex: '(resource.+Read|flatten.+Resource)' - pattern-inside: func $FUNCNAME(...) $RETURNTYPE { ... } From 0a097755426e8c448238ba3c2abd174c2d0a1938 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 16 Nov 2021 10:30:37 -0800 Subject: [PATCH 230/304] Restores `prefer-aws-go-sdk-pointer-conversion-assignment` rule --- .semgrep.yml | 37 +- internal/service/apigateway/flex.go | 4 +- .../autoscaling/launch_configuration.go | 30 +- internal/service/autoscaling/sweep.go | 2 +- internal/service/cloudformation/flex.go | 8 +- .../distribution_configuration_structure.go | 18 +- internal/service/cognitoidentity/flex.go | 34 +- internal/service/cognitoidentity/pool.go | 10 +- .../cognitoidentity/pool_roles_attachment.go | 2 +- internal/service/configservice/flex.go | 22 +- internal/service/dax/cluster.go | 2 +- internal/service/dynamodb/global_table.go | 2 +- internal/service/ecr/repository.go | 2 +- internal/service/ecs/flex.go | 4 +- internal/service/ecs/task_definition.go | 2 +- internal/service/ecs/task_definition_test.go | 2 +- .../service/elasticache/replication_group.go | 2 +- internal/service/elasticache/subnet_group.go | 2 +- internal/service/glue/resource_policy_test.go | 3 +- internal/service/kms/key_test.go | 2 +- internal/service/mq/broker.go | 6 +- internal/service/organizations/account.go | 2 +- .../serverlessrepo/cloudformation_stack.go | 2 +- .../ses/identity_notification_topic_test.go | 6 +- .../signer/signing_profile_permission.go | 2 +- internal/service/sns/topic_test.go | 6 +- internal/service/swf/domain.go | 2 +- internal/service/waf/byte_match_set.go | 4 +- internal/service/waf/flex.go | 20 +- internal/service/waf/geo_match_set.go | 4 +- internal/service/waf/helpers.go | 64 ++-- internal/service/waf/rate_based_rule.go | 2 +- internal/service/waf/regex_match_set.go | 4 +- internal/service/waf/regex_pattern_set.go | 2 +- internal/service/waf/rule.go | 2 +- internal/service/waf/rule_group.go | 4 +- internal/service/waf/size_constraint_set.go | 6 +- .../service/waf/sql_injection_match_set.go | 4 +- internal/service/waf/web_acl.go | 18 +- internal/service/waf/xss_match_set.go | 4 +- .../service/wafregional/byte_match_set.go | 5 +- internal/service/wafregional/flex.go | 117 ------ internal/service/wafregional/geo_match_set.go | 5 +- .../service/wafregional/rate_based_rule.go | 3 +- .../service/wafregional/regex_match_set.go | 9 +- .../wafregional/regex_match_set_test.go | 5 +- .../service/wafregional/regex_pattern_set.go | 3 +- internal/service/wafregional/rule.go | 3 +- internal/service/wafregional/rule_group.go | 5 +- .../wafregional/size_constraint_set.go | 7 +- .../wafregional/sql_injection_match_set.go | 7 +- internal/service/wafregional/waf_helpers.go | 357 ------------------ internal/service/wafregional/web_acl.go | 21 +- internal/service/wafregional/xss_match_set.go | 7 +- 54 files changed, 212 insertions(+), 696 deletions(-) delete mode 100644 internal/service/wafregional/flex.go delete mode 100644 internal/service/wafregional/waf_helpers.go diff --git a/.semgrep.yml b/.semgrep.yml index 5b06b93339f5..e4f58f08fd81 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -43,27 +43,24 @@ rules: languages: [go] message: Prefer AWS Go SDK pointer conversion functions for dereferencing during assignment, e.g. aws.StringValue() paths: - exclude: - - aws/cloudfront_distribution_configuration_structure.go - - aws/data_source_aws_route_table.go - - aws/opsworks_layers.go - - aws/resource_aws_d* - - aws/resource_aws_e* - - aws/resource_aws_g* - - aws/resource_aws_i* - - aws/resource_aws_k* - - aws/resource_aws_l* - - aws/resource_aws_mq_broker.go - - aws/resource_aws_o* - - aws/resource_aws_r* - - aws/resource_aws_s* - - aws/structure.go - - aws/waf_helpers.go - - aws/internal/generators/ - - aws/internal/keyvaluetags/ - - providerlint/vendor/ include: - - aws/ + - internal/service + exclude: + - internal/service/ec2 + - internal/service/elasticbeanstalk + - internal/service/elasticsearch + - internal/service/elb + - internal/service/emr + - internal/service/gamelift + - internal/service/iam + - internal/service/lambda + - internal/service/opsworks + - internal/service/rds + - internal/service/redshift + - internal/service/route53 + - internal/service/s3 + - internal/service/servicediscovery + - internal/service/ssm patterns: - pattern: '$LHS = *$RHS' - pattern-not: '*$LHS2 = *$RHS' diff --git a/internal/service/apigateway/flex.go b/internal/service/apigateway/flex.go index 2d2794936ab9..14c0a215f701 100644 --- a/internal/service/apigateway/flex.go +++ b/internal/service/apigateway/flex.go @@ -112,11 +112,11 @@ func FlattenThrottleSettings(settings *apigateway.ThrottleSettings) []map[string if settings != nil { r := make(map[string]interface{}) if settings.BurstLimit != nil { - r["burst_limit"] = *settings.BurstLimit + r["burst_limit"] = aws.Int64Value(settings.BurstLimit) } if settings.RateLimit != nil { - r["rate_limit"] = *settings.RateLimit + r["rate_limit"] = aws.Float64Value(settings.RateLimit) } result = append(result, r) diff --git a/internal/service/autoscaling/launch_configuration.go b/internal/service/autoscaling/launch_configuration.go index 2d5bc4becfd8..08440500bff5 100644 --- a/internal/service/autoscaling/launch_configuration.go +++ b/internal/service/autoscaling/launch_configuration.go @@ -744,15 +744,11 @@ func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autosca if len(lc.BlockDeviceMappings) == 0 { return nil, nil } - rootDeviceName, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn) + v, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn) if err != nil { return nil, err } - if rootDeviceName == nil { - // We do this so the value is empty so we don't have to do nil checks later - var blank string - rootDeviceName = &blank - } + rootDeviceName := aws.StringValue(v) // Collect existing configured devices, so we can check // existing value of delete_on_termination below @@ -777,41 +773,41 @@ func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autosca } bd["delete_on_termination"] = deleteOnTermination } else if bdm.Ebs != nil && bdm.Ebs.DeleteOnTermination != nil { - bd["delete_on_termination"] = *bdm.Ebs.DeleteOnTermination + bd["delete_on_termination"] = aws.BoolValue(bdm.Ebs.DeleteOnTermination) } if bdm.Ebs != nil && bdm.Ebs.VolumeSize != nil { - bd["volume_size"] = *bdm.Ebs.VolumeSize + bd["volume_size"] = aws.Int64Value(bdm.Ebs.VolumeSize) } if bdm.Ebs != nil && bdm.Ebs.VolumeType != nil { - bd["volume_type"] = *bdm.Ebs.VolumeType + bd["volume_type"] = aws.StringValue(bdm.Ebs.VolumeType) } if bdm.Ebs != nil && bdm.Ebs.Iops != nil { - bd["iops"] = *bdm.Ebs.Iops + bd["iops"] = aws.Int64Value(bdm.Ebs.Iops) } if bdm.Ebs != nil && bdm.Ebs.Throughput != nil { - bd["throughput"] = *bdm.Ebs.Throughput + bd["throughput"] = aws.Int64Value(bdm.Ebs.Throughput) } if bdm.Ebs != nil && bdm.Ebs.Encrypted != nil { - bd["encrypted"] = *bdm.Ebs.Encrypted + bd["encrypted"] = aws.BoolValue(bdm.Ebs.Encrypted) } - if bdm.DeviceName != nil && *bdm.DeviceName == *rootDeviceName { + if bdm.DeviceName != nil && aws.StringValue(bdm.DeviceName) == rootDeviceName { blockDevices["root"] = bd } else { if bdm.DeviceName != nil { - bd["device_name"] = *bdm.DeviceName + bd["device_name"] = aws.StringValue(bdm.DeviceName) } if bdm.VirtualName != nil { - bd["virtual_name"] = *bdm.VirtualName + bd["virtual_name"] = aws.StringValue(bdm.VirtualName) blockDevices["ephemeral"] = append(blockDevices["ephemeral"].([]map[string]interface{}), bd) } else { if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { - bd["snapshot_id"] = *bdm.Ebs.SnapshotId + bd["snapshot_id"] = aws.StringValue(bdm.Ebs.SnapshotId) } if bdm.NoDevice != nil { - bd["no_device"] = *bdm.NoDevice + bd["no_device"] = aws.BoolValue(bdm.NoDevice) } blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd) } diff --git a/internal/service/autoscaling/sweep.go b/internal/service/autoscaling/sweep.go index de2f1f74f84b..6cacca292045 100644 --- a/internal/service/autoscaling/sweep.go +++ b/internal/service/autoscaling/sweep.go @@ -104,7 +104,7 @@ func sweepLaunchConfigurations(region string) error { } for _, lc := range resp.LaunchConfigurations { - name := *lc.LaunchConfigurationName + name := aws.StringValue(lc.LaunchConfigurationName) log.Printf("[INFO] Deleting Launch Configuration: %s", name) _, err := conn.DeleteLaunchConfiguration( diff --git a/internal/service/cloudformation/flex.go b/internal/service/cloudformation/flex.go index 4cc6bdc71190..e0a40388ae9f 100644 --- a/internal/service/cloudformation/flex.go +++ b/internal/service/cloudformation/flex.go @@ -20,7 +20,7 @@ func expandParameters(params map[string]interface{}) []*cloudformation.Parameter func flattenAllCloudFormationParameters(cfParams []*cloudformation.Parameter) map[string]interface{} { params := make(map[string]interface{}, len(cfParams)) for _, p := range cfParams { - params[*p.ParameterKey] = *p.ParameterValue + params[aws.StringValue(p.ParameterKey)] = aws.StringValue(p.ParameterValue) } return params } @@ -28,7 +28,7 @@ func flattenAllCloudFormationParameters(cfParams []*cloudformation.Parameter) ma func flattenOutputs(cfOutputs []*cloudformation.Output) map[string]string { outputs := make(map[string]string, len(cfOutputs)) for _, o := range cfOutputs { - outputs[*o.OutputKey] = *o.OutputValue + outputs[aws.StringValue(o.OutputKey)] = aws.StringValue(o.OutputValue) } return outputs } @@ -40,9 +40,9 @@ func flattenParameters(cfParams []*cloudformation.Parameter, originalParams map[string]interface{}) map[string]interface{} { params := make(map[string]interface{}, len(cfParams)) for _, p := range cfParams { - _, isConfigured := originalParams[*p.ParameterKey] + _, isConfigured := originalParams[aws.StringValue(p.ParameterKey)] if isConfigured { - params[*p.ParameterKey] = *p.ParameterValue + params[aws.StringValue(p.ParameterKey)] = aws.StringValue(p.ParameterValue) } } return params diff --git a/internal/service/cloudfront/distribution_configuration_structure.go b/internal/service/cloudfront/distribution_configuration_structure.go index 14b8f1ea34b9..778baeedaa0c 100644 --- a/internal/service/cloudfront/distribution_configuration_structure.go +++ b/internal/service/cloudfront/distribution_configuration_structure.go @@ -386,7 +386,7 @@ func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { m["max_ttl"] = int(*cb.MaxTTL) } if cb.SmoothStreaming != nil { - m["smooth_streaming"] = *cb.SmoothStreaming + m["smooth_streaming"] = aws.BoolValue(cb.SmoothStreaming) } if cb.DefaultTTL != nil { m["default_ttl"] = int(*cb.DefaultTTL) @@ -398,7 +398,7 @@ func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { m["cached_methods"] = FlattenCachedMethods(cb.AllowedMethods.CachedMethods) } if cb.PathPattern != nil { - m["path_pattern"] = *cb.PathPattern + m["path_pattern"] = aws.StringValue(cb.PathPattern) } return m } @@ -1154,7 +1154,7 @@ func FlattenCustomErrorResponse(er *cloudfront.CustomErrorResponse) map[string]i m["response_code"], _ = strconv.Atoi(*er.ResponseCode) } if er.ResponsePagePath != nil { - m["response_page_path"] = *er.ResponsePagePath + m["response_page_path"] = aws.StringValue(er.ResponsePagePath) } return m } @@ -1285,18 +1285,18 @@ func flattenViewerCertificate(vc *cloudfront.ViewerCertificate) []interface{} { m := make(map[string]interface{}) if vc.IAMCertificateId != nil { - m["iam_certificate_id"] = *vc.IAMCertificateId - m["ssl_support_method"] = *vc.SSLSupportMethod + m["iam_certificate_id"] = aws.StringValue(vc.IAMCertificateId) + m["ssl_support_method"] = aws.StringValue(vc.SSLSupportMethod) } if vc.ACMCertificateArn != nil { - m["acm_certificate_arn"] = *vc.ACMCertificateArn - m["ssl_support_method"] = *vc.SSLSupportMethod + m["acm_certificate_arn"] = aws.StringValue(vc.ACMCertificateArn) + m["ssl_support_method"] = aws.StringValue(vc.SSLSupportMethod) } if vc.CloudFrontDefaultCertificate != nil { - m["cloudfront_default_certificate"] = *vc.CloudFrontDefaultCertificate + m["cloudfront_default_certificate"] = aws.BoolValue(vc.CloudFrontDefaultCertificate) } if vc.MinimumProtocolVersion != nil { - m["minimum_protocol_version"] = *vc.MinimumProtocolVersion + m["minimum_protocol_version"] = aws.StringValue(vc.MinimumProtocolVersion) } return []interface{}{m} } diff --git a/internal/service/cognitoidentity/flex.go b/internal/service/cognitoidentity/flex.go index 17f05119265e..0226be8a6d69 100644 --- a/internal/service/cognitoidentity/flex.go +++ b/internal/service/cognitoidentity/flex.go @@ -110,11 +110,11 @@ func flattenIdentityPoolRoleMappingsAttachment(rms map[string]*cognitoidentity.R } if v.Type != nil { - m["type"] = *v.Type + m["type"] = aws.StringValue(v.Type) } if v.AmbiguousRoleResolution != nil { - m["ambiguous_role_resolution"] = *v.AmbiguousRoleResolution + m["ambiguous_role_resolution"] = aws.StringValue(v.AmbiguousRoleResolution) } if v.RulesConfiguration != nil && v.RulesConfiguration.Rules != nil { @@ -128,23 +128,15 @@ func flattenIdentityPoolRoleMappingsAttachment(rms map[string]*cognitoidentity.R return roleMappings } -func flattenIdentityPoolRoles(config map[string]*string) map[string]string { - m := map[string]string{} - for k, v := range config { - m[k] = *v - } - return m -} - func flattenIdentityPoolRolesAttachmentMappingRules(d []*cognitoidentity.MappingRule) []interface{} { rules := make([]interface{}, 0) for _, rule := range d { r := make(map[string]interface{}) - r["claim"] = *rule.Claim - r["match_type"] = *rule.MatchType - r["role_arn"] = *rule.RoleARN - r["value"] = *rule.Value + r["claim"] = aws.StringValue(rule.Claim) + r["match_type"] = aws.StringValue(rule.MatchType) + r["role_arn"] = aws.StringValue(rule.RoleARN) + r["value"] = aws.StringValue(rule.Value) rules = append(rules, r) } @@ -163,15 +155,15 @@ func flattenIdentityProviders(ips []*cognitoidentity.Provider) []map[string]inte } if v.ClientId != nil { - ip["client_id"] = *v.ClientId + ip["client_id"] = aws.StringValue(v.ClientId) } if v.ProviderName != nil { - ip["provider_name"] = *v.ProviderName + ip["provider_name"] = aws.StringValue(v.ProviderName) } if v.ServerSideTokenCheck != nil { - ip["server_side_token_check"] = *v.ServerSideTokenCheck + ip["server_side_token_check"] = aws.BoolValue(v.ServerSideTokenCheck) } values = append(values, ip) @@ -179,11 +171,3 @@ func flattenIdentityProviders(ips []*cognitoidentity.Provider) []map[string]inte return values } - -func flattenSupportedLoginProviders(config map[string]*string) map[string]string { - m := map[string]string{} - for k, v := range config { - m[k] = *v - } - return m -} diff --git a/internal/service/cognitoidentity/pool.go b/internal/service/cognitoidentity/pool.go index 9ac03e064889..910f2ab3aac7 100644 --- a/internal/service/cognitoidentity/pool.go +++ b/internal/service/cognitoidentity/pool.go @@ -204,19 +204,19 @@ func resourcePoolRead(d *schema.ResourceData, meta interface{}) error { } if err := d.Set("cognito_identity_providers", flattenIdentityProviders(ip.CognitoIdentityProviders)); err != nil { - return fmt.Errorf("Error setting cognito_identity_providers error: %#v", err) + return fmt.Errorf("Error setting cognito_identity_providers error: %w", err) } if err := d.Set("openid_connect_provider_arns", flex.FlattenStringList(ip.OpenIdConnectProviderARNs)); err != nil { - return fmt.Errorf("Error setting openid_connect_provider_arns error: %#v", err) + return fmt.Errorf("Error setting openid_connect_provider_arns error: %w", err) } if err := d.Set("saml_provider_arns", flex.FlattenStringList(ip.SamlProviderARNs)); err != nil { - return fmt.Errorf("Error setting saml_provider_arns error: %#v", err) + return fmt.Errorf("Error setting saml_provider_arns error: %w", err) } - if err := d.Set("supported_login_providers", flattenSupportedLoginProviders(ip.SupportedLoginProviders)); err != nil { - return fmt.Errorf("Error setting supported_login_providers error: %#v", err) + if err := d.Set("supported_login_providers", aws.StringValueMap(ip.SupportedLoginProviders)); err != nil { + return fmt.Errorf("Error setting supported_login_providers error: %w", err) } return nil diff --git a/internal/service/cognitoidentity/pool_roles_attachment.go b/internal/service/cognitoidentity/pool_roles_attachment.go index 239fde994e30..943338a01cb8 100644 --- a/internal/service/cognitoidentity/pool_roles_attachment.go +++ b/internal/service/cognitoidentity/pool_roles_attachment.go @@ -159,7 +159,7 @@ func resourcePoolRolesAttachmentRead(d *schema.ResourceData, meta interface{}) e d.Set("identity_pool_id", ip.IdentityPoolId) - if err := d.Set("roles", flattenIdentityPoolRoles(ip.Roles)); err != nil { + if err := d.Set("roles", aws.StringValueMap(ip.Roles)); err != nil { return fmt.Errorf("Error setting roles error: %#v", err) } diff --git a/internal/service/configservice/flex.go b/internal/service/configservice/flex.go index f241d1b739ca..d5f3c6db9c56 100644 --- a/internal/service/configservice/flex.go +++ b/internal/service/configservice/flex.go @@ -164,11 +164,11 @@ func flattenRecordingGroup(g *configservice.RecordingGroup) []map[string]interfa m := make(map[string]interface{}, 1) if g.AllSupported != nil { - m["all_supported"] = *g.AllSupported + m["all_supported"] = aws.BoolValue(g.AllSupported) } if g.IncludeGlobalResourceTypes != nil { - m["include_global_resource_types"] = *g.IncludeGlobalResourceTypes + m["include_global_resource_types"] = aws.BoolValue(g.IncludeGlobalResourceTypes) } if g.ResourceTypes != nil && len(g.ResourceTypes) > 0 { @@ -183,16 +183,16 @@ func flattenRuleScope(scope *configservice.Scope) []interface{} { m := make(map[string]interface{}) if scope.ComplianceResourceId != nil { - m["compliance_resource_id"] = *scope.ComplianceResourceId + m["compliance_resource_id"] = aws.StringValue(scope.ComplianceResourceId) } if scope.ComplianceResourceTypes != nil { m["compliance_resource_types"] = flex.FlattenStringSet(scope.ComplianceResourceTypes) } if scope.TagKey != nil { - m["tag_key"] = *scope.TagKey + m["tag_key"] = aws.StringValue(scope.TagKey) } if scope.TagValue != nil { - m["tag_value"] = *scope.TagValue + m["tag_value"] = aws.StringValue(scope.TagValue) } items = append(items, m) @@ -202,8 +202,8 @@ func flattenRuleScope(scope *configservice.Scope) []interface{} { func flattenRuleSource(source *configservice.Source) []interface{} { var result []interface{} m := make(map[string]interface{}) - m["owner"] = *source.Owner - m["source_identifier"] = *source.SourceIdentifier + m["owner"] = aws.StringValue(source.Owner) + m["source_identifier"] = aws.StringValue(source.SourceIdentifier) if len(source.SourceDetails) > 0 { m["source_detail"] = schema.NewSet(ruleSourceDetailsHash, flattenRuleSourceDetails(source.SourceDetails)) } @@ -216,13 +216,13 @@ func flattenRuleSourceDetails(details []*configservice.SourceDetail) []interface for _, d := range details { m := make(map[string]interface{}) if d.MessageType != nil { - m["message_type"] = *d.MessageType + m["message_type"] = aws.StringValue(d.MessageType) } if d.EventSource != nil { - m["event_source"] = *d.EventSource + m["event_source"] = aws.StringValue(d.EventSource) } if d.MaximumExecutionFrequency != nil { - m["maximum_execution_frequency"] = *d.MaximumExecutionFrequency + m["maximum_execution_frequency"] = aws.StringValue(d.MaximumExecutionFrequency) } items = append(items, m) @@ -235,7 +235,7 @@ func flattenSnapshotDeliveryProperties(p *configservice.ConfigSnapshotDeliveryPr m := make(map[string]interface{}) if p.DeliveryFrequency != nil { - m["delivery_frequency"] = *p.DeliveryFrequency + m["delivery_frequency"] = aws.StringValue(p.DeliveryFrequency) } return []map[string]interface{}{m} diff --git a/internal/service/dax/cluster.go b/internal/service/dax/cluster.go index 4b8fc72c290b..10fafec2a77c 100644 --- a/internal/service/dax/cluster.go +++ b/internal/service/dax/cluster.go @@ -584,7 +584,7 @@ func daxClusterStateRefreshFunc(conn *dax.DAX, clusterID, givenState string, pen // return the current state if it's in the pending array for _, p := range pending { log.Printf("[DEBUG] DAX: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.Status) - s := *c.Status + s := aws.StringValue(c.Status) if p == s { log.Printf("[DEBUG] Return with status: %v", *c.Status) return c, p, nil diff --git a/internal/service/dynamodb/global_table.go b/internal/service/dynamodb/global_table.go index 45186302404f..ce9848f2f422 100644 --- a/internal/service/dynamodb/global_table.go +++ b/internal/service/dynamodb/global_table.go @@ -310,6 +310,6 @@ func flattenReplicas(replicaDescriptions []*dynamodb.ReplicaDescription) []inter func flattenReplica(replicaDescription *dynamodb.ReplicaDescription) map[string]interface{} { replica := make(map[string]interface{}) - replica["region_name"] = *replicaDescription.RegionName + replica["region_name"] = aws.StringValue(replicaDescription.RegionName) return replica } diff --git a/internal/service/ecr/repository.go b/internal/service/ecr/repository.go index 3518fd180c9f..225606304fe3 100644 --- a/internal/service/ecr/repository.go +++ b/internal/service/ecr/repository.go @@ -135,7 +135,7 @@ func resourceRepositoryCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error creating ECR repository: %s", err) } - repository := *out.Repository + repository := *out.Repository // nosemgrep: prefer-aws-go-sdk-pointer-conversion-assignment // false positive log.Printf("[DEBUG] ECR repository created: %q", *repository.RepositoryArn) diff --git a/internal/service/ecs/flex.go b/internal/service/ecs/flex.go index d0654fee5a64..68f5ca395c71 100644 --- a/internal/service/ecs/flex.go +++ b/internal/service/ecs/flex.go @@ -43,11 +43,11 @@ func flattenECSLoadBalancers(list []*ecs.LoadBalancer) []map[string]interface{} } if loadBalancer.LoadBalancerName != nil { - l["elb_name"] = *loadBalancer.LoadBalancerName + l["elb_name"] = aws.StringValue(loadBalancer.LoadBalancerName) } if loadBalancer.TargetGroupArn != nil { - l["target_group_arn"] = *loadBalancer.TargetGroupArn + l["target_group_arn"] = aws.StringValue(loadBalancer.TargetGroupArn) } result = append(result, l) diff --git a/internal/service/ecs/task_definition.go b/internal/service/ecs/task_definition.go index ca634c7df69b..2e0c96602a8e 100644 --- a/internal/service/ecs/task_definition.go +++ b/internal/service/ecs/task_definition.go @@ -500,7 +500,7 @@ func resourceTaskDefinitionCreate(d *schema.ResourceData, meta interface{}) erro return err } - taskDefinition := *out.TaskDefinition + taskDefinition := *out.TaskDefinition // nosemgrep: prefer-aws-go-sdk-pointer-conversion-assignment // false positive log.Printf("[DEBUG] ECS task definition registered: %q (rev. %d)", aws.StringValue(taskDefinition.TaskDefinitionArn), aws.Int64Value(taskDefinition.Revision)) diff --git a/internal/service/ecs/task_definition_test.go b/internal/service/ecs/task_definition_test.go index f5cb5f8b3097..79ae3df0a4d3 100644 --- a/internal/service/ecs/task_definition_test.go +++ b/internal/service/ecs/task_definition_test.go @@ -945,7 +945,7 @@ func testAccCheckTaskDefinitionProxyConfiguration(after *ecs.TaskDefinition, con propertyLookups := make(map[string]string) for _, property := range properties { - propertyLookups[*property.Name] = *property.Value + propertyLookups[aws.StringValue(property.Name)] = aws.StringValue(property.Value) } if propertyLookups["IgnoredUID"] != ignoredUid { diff --git a/internal/service/elasticache/replication_group.go b/internal/service/elasticache/replication_group.go index 1047fd6ce7ab..1a0d3f8d9f5f 100644 --- a/internal/service/elasticache/replication_group.go +++ b/internal/service/elasticache/replication_group.go @@ -538,7 +538,7 @@ func resourceReplicationGroupRead(d *schema.ResourceData, meta interface{}) erro return nil } - cacheCluster := *rgp.NodeGroups[0].NodeGroupMembers[0] + cacheCluster := *rgp.NodeGroups[0].NodeGroupMembers[0] // nosemgrep: prefer-aws-go-sdk-pointer-conversion-assignment // false positive res, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ CacheClusterId: cacheCluster.CacheClusterId, diff --git a/internal/service/elasticache/subnet_group.go b/internal/service/elasticache/subnet_group.go index 3179a988067f..5a80f683cbc8 100644 --- a/internal/service/elasticache/subnet_group.go +++ b/internal/service/elasticache/subnet_group.go @@ -148,7 +148,7 @@ func resourceSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { ids := make([]string, len(group.Subnets)) for i, s := range group.Subnets { - ids[i] = *s.SubnetIdentifier + ids[i] = aws.StringValue(s.SubnetIdentifier) } d.Set("arn", group.ARN) diff --git a/internal/service/glue/resource_policy_test.go b/internal/service/glue/resource_policy_test.go index 36622cb98e9f..d01a4ce5c855 100644 --- a/internal/service/glue/resource_policy_test.go +++ b/internal/service/glue/resource_policy_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -209,7 +210,7 @@ func testAccResourcePolicy(n string, action string) resource.TestCheckFunc { return fmt.Errorf("Get resource policy error: %v", err) } - actualPolicyText := *policy.PolicyInJson + actualPolicyText := aws.StringValue(policy.PolicyInJson) expectedPolicy := CreateTablePolicy(action) equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicy) diff --git a/internal/service/kms/key_test.go b/internal/service/kms/key_test.go index 2620adef92b9..3522deb2bf4a 100644 --- a/internal/service/kms/key_test.go +++ b/internal/service/kms/key_test.go @@ -384,7 +384,7 @@ func testAccCheckKeyHasPolicy(name string, expectedPolicyText string) resource.T return err } - actualPolicyText := *out.Policy + actualPolicyText := aws.StringValue(out.Policy) equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) if err != nil { diff --git a/internal/service/mq/broker.go b/internal/service/mq/broker.go index 91d44ef0e010..807b932d1f74 100644 --- a/internal/service/mq/broker.go +++ b/internal/service/mq/broker.go @@ -875,13 +875,13 @@ func flattenMqWeeklyStartTime(wst *mq.WeeklyStartTime) []interface{} { } m := make(map[string]interface{}) if wst.DayOfWeek != nil { - m["day_of_week"] = *wst.DayOfWeek + m["day_of_week"] = aws.StringValue(wst.DayOfWeek) } if wst.TimeOfDay != nil { - m["time_of_day"] = *wst.TimeOfDay + m["time_of_day"] = aws.StringValue(wst.TimeOfDay) } if wst.TimeZone != nil { - m["time_zone"] = *wst.TimeZone + m["time_zone"] = aws.StringValue(wst.TimeZone) } return []interface{}{m} } diff --git a/internal/service/organizations/account.go b/internal/service/organizations/account.go index 42f49c6460c3..5b6aaf8e93e7 100644 --- a/internal/service/organizations/account.go +++ b/internal/service/organizations/account.go @@ -136,7 +136,7 @@ func resourceAccountCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating account: %s", err) } - requestId := *resp.CreateAccountStatus.Id + requestId := aws.StringValue(resp.CreateAccountStatus.Id) // Wait for the account to become available log.Printf("[DEBUG] Waiting for account request (%s) to succeed", requestId) diff --git a/internal/service/serverlessrepo/cloudformation_stack.go b/internal/service/serverlessrepo/cloudformation_stack.go index 3f1544c049ea..deededd3f371 100644 --- a/internal/service/serverlessrepo/cloudformation_stack.go +++ b/internal/service/serverlessrepo/cloudformation_stack.go @@ -352,7 +352,7 @@ func flattenServerlessRepositoryStackCapabilities(stackCapabilities []*string, a func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string { outputs := make(map[string]string, len(cfOutputs)) for _, o := range cfOutputs { - outputs[*o.OutputKey] = *o.OutputValue + outputs[aws.StringValue(o.OutputKey)] = aws.StringValue(o.OutputValue) } return outputs } diff --git a/internal/service/ses/identity_notification_topic_test.go b/internal/service/ses/identity_notification_topic_test.go index 4e77c30c3611..0fd8938ec92e 100644 --- a/internal/service/ses/identity_notification_topic_test.go +++ b/internal/service/ses/identity_notification_topic_test.go @@ -119,11 +119,11 @@ func testAccCheckIdentityNotificationTopicExists(n string) resource.TestCheckFun var headersIncluded bool switch notificationType { case ses.NotificationTypeBounce: - headersIncluded = *response.NotificationAttributes[identity].HeadersInBounceNotificationsEnabled + headersIncluded = aws.BoolValue(response.NotificationAttributes[identity].HeadersInBounceNotificationsEnabled) case ses.NotificationTypeComplaint: - headersIncluded = *response.NotificationAttributes[identity].HeadersInComplaintNotificationsEnabled + headersIncluded = aws.BoolValue(response.NotificationAttributes[identity].HeadersInComplaintNotificationsEnabled) case ses.NotificationTypeDelivery: - headersIncluded = *response.NotificationAttributes[identity].HeadersInDeliveryNotificationsEnabled + headersIncluded = aws.BoolValue(response.NotificationAttributes[identity].HeadersInDeliveryNotificationsEnabled) } if headersIncluded != headersExpected { diff --git a/internal/service/signer/signing_profile_permission.go b/internal/service/signer/signing_profile_permission.go index dfac82f58320..a87abecbeaec 100644 --- a/internal/service/signer/signing_profile_permission.go +++ b/internal/service/signer/signing_profile_permission.go @@ -97,7 +97,7 @@ func resourceSigningProfilePermissionCreate(d *schema.ResourceData, meta interfa return err } } else { - revisionId = *getProfilePermissionsOutput.RevisionId + revisionId = aws.StringValue(getProfilePermissionsOutput.RevisionId) } statementId := create.Name(d.Get("statement_id").(string), d.Get("statement_id_prefix").(string)) diff --git a/internal/service/sns/topic_test.go b/internal/service/sns/topic_test.go index db2039168e8e..773ec2df5dca 100644 --- a/internal/service/sns/topic_test.go +++ b/internal/service/sns/topic_test.go @@ -528,7 +528,7 @@ func testAccCheckNSTopicHasPolicy(n string, expectedPolicyText string) resource. var actualPolicyText string for k, v := range resp.Attributes { if k == "Policy" { - actualPolicyText = *v + actualPolicyText = aws.StringValue(v) break } } @@ -570,7 +570,7 @@ func testAccCheckNSTopicHasDeliveryPolicy(n string, expectedPolicyText string) r var actualPolicyText string for k, v := range resp.Attributes { if k == "DeliveryPolicy" { - actualPolicyText = *v + actualPolicyText = aws.StringValue(v) break } } @@ -634,7 +634,7 @@ func testAccCheckTopicExists(n string, attributes map[string]string) resource.Te } for k, v := range out.Attributes { - attributes[k] = *v + attributes[k] = aws.StringValue(v) } return nil diff --git a/internal/service/swf/domain.go b/internal/service/swf/domain.go index 0b09fe7b638f..a9766d1aca1d 100644 --- a/internal/service/swf/domain.go +++ b/internal/service/swf/domain.go @@ -129,7 +129,7 @@ func resourceDomainRead(d *schema.ResourceData, meta interface{}) error { return nil } - arn := *resp.DomainInfo.Arn + arn := aws.StringValue(resp.DomainInfo.Arn) tags, err := ListTags(conn, arn) if err != nil { diff --git a/internal/service/waf/byte_match_set.go b/internal/service/waf/byte_match_set.go index 6147e168e5f2..160ed207ff13 100644 --- a/internal/service/waf/byte_match_set.go +++ b/internal/service/waf/byte_match_set.go @@ -219,7 +219,7 @@ func diffWafByteMatchSetTuples(oldT, newT []interface{}) []*waf.ByteMatchSetUpda updates = append(updates, &waf.ByteMatchSetUpdate{ Action: aws.String(waf.ChangeActionDelete), ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), TargetString: []byte(tuple["target_string"].(string)), TextTransformation: aws.String(tuple["text_transformation"].(string)), @@ -233,7 +233,7 @@ func diffWafByteMatchSetTuples(oldT, newT []interface{}) []*waf.ByteMatchSetUpda updates = append(updates, &waf.ByteMatchSetUpdate{ Action: aws.String(waf.ChangeActionInsert), ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), TargetString: []byte(tuple["target_string"].(string)), TextTransformation: aws.String(tuple["text_transformation"].(string)), diff --git a/internal/service/waf/flex.go b/internal/service/waf/flex.go index b840c5144e65..f6fae2f577e9 100644 --- a/internal/service/waf/flex.go +++ b/internal/service/waf/flex.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/service/waf" ) -func expandAction(l []interface{}) *waf.WafAction { +func ExpandAction(l []interface{}) *waf.WafAction { if len(l) == 0 || l[0] == nil { return nil } @@ -17,7 +17,7 @@ func expandAction(l []interface{}) *waf.WafAction { } } -func expandOverrideAction(l []interface{}) *waf.WafOverrideAction { +func ExpandOverrideAction(l []interface{}) *waf.WafOverrideAction { if len(l) == 0 || l[0] == nil { return nil } @@ -29,20 +29,20 @@ func expandOverrideAction(l []interface{}) *waf.WafOverrideAction { } } -func expandWebACLUpdate(updateAction string, aclRule map[string]interface{}) *waf.WebACLUpdate { +func ExpandWebACLUpdate(updateAction string, aclRule map[string]interface{}) *waf.WebACLUpdate { var rule *waf.ActivatedRule switch aclRule["type"].(string) { case waf.WafRuleTypeGroup: rule = &waf.ActivatedRule{ - OverrideAction: expandOverrideAction(aclRule["override_action"].([]interface{})), + OverrideAction: ExpandOverrideAction(aclRule["override_action"].([]interface{})), Priority: aws.Int64(int64(aclRule["priority"].(int))), RuleId: aws.String(aclRule["rule_id"].(string)), Type: aws.String(aclRule["type"].(string)), } default: rule = &waf.ActivatedRule{ - Action: expandAction(aclRule["action"].([]interface{})), + Action: ExpandAction(aclRule["action"].([]interface{})), Priority: aws.Int64(int64(aclRule["priority"].(int))), RuleId: aws.String(aclRule["rule_id"].(string)), Type: aws.String(aclRule["type"].(string)), @@ -57,7 +57,7 @@ func expandWebACLUpdate(updateAction string, aclRule map[string]interface{}) *wa return update } -func flattenAction(n *waf.WafAction) []map[string]interface{} { +func FlattenAction(n *waf.WafAction) []map[string]interface{} { if n == nil { return nil } @@ -69,7 +69,7 @@ func flattenAction(n *waf.WafAction) []map[string]interface{} { return []map[string]interface{}{result} } -func flattenWebACLRules(ts []*waf.ActivatedRule) []map[string]interface{} { +func FlattenWebACLRules(ts []*waf.ActivatedRule) []map[string]interface{} { out := make([]map[string]interface{}, len(ts)) for i, r := range ts { m := make(map[string]interface{}) @@ -95,7 +95,7 @@ func flattenWebACLRules(ts []*waf.ActivatedRule) []map[string]interface{} { return out } -func expandFieldToMatch(d map[string]interface{}) *waf.FieldToMatch { +func ExpandFieldToMatch(d map[string]interface{}) *waf.FieldToMatch { ftm := &waf.FieldToMatch{ Type: aws.String(d["type"].(string)), } @@ -108,10 +108,10 @@ func expandFieldToMatch(d map[string]interface{}) *waf.FieldToMatch { func FlattenFieldToMatch(fm *waf.FieldToMatch) []interface{} { m := make(map[string]interface{}) if fm.Data != nil { - m["data"] = *fm.Data + m["data"] = aws.StringValue(fm.Data) } if fm.Type != nil { - m["type"] = *fm.Type + m["type"] = aws.StringValue(fm.Type) } return []interface{}{m} } diff --git a/internal/service/waf/geo_match_set.go b/internal/service/waf/geo_match_set.go index f3331556fee3..814ebb2ed273 100644 --- a/internal/service/waf/geo_match_set.go +++ b/internal/service/waf/geo_match_set.go @@ -95,7 +95,7 @@ func resourceGeoMatchSetRead(d *schema.ResourceData, meta interface{}) error { } d.Set("name", resp.GeoMatchSet.Name) - d.Set("geo_match_constraint", flattenWafGeoMatchConstraint(resp.GeoMatchSet.GeoMatchConstraints)) + d.Set("geo_match_constraint", FlattenGeoMatchConstraint(resp.GeoMatchSet.GeoMatchConstraints)) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, @@ -158,7 +158,7 @@ func updateGeoMatchSetResource(id string, oldT, newT []interface{}, conn *waf.WA req := &waf.UpdateGeoMatchSetInput{ ChangeToken: token, GeoMatchSetId: aws.String(id), - Updates: diffWafGeoMatchSetConstraints(oldT, newT), + Updates: DiffGeoMatchSetConstraints(oldT, newT), } log.Printf("[INFO] Updating GeoMatchSet constraints: %s", req) diff --git a/internal/service/waf/helpers.go b/internal/service/waf/helpers.go index f44272042ee7..f8a7721870aa 100644 --- a/internal/service/waf/helpers.go +++ b/internal/service/waf/helpers.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -func wafSizeConstraintSetSchema() map[string]*schema.Schema { +func SizeConstraintSetSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -63,7 +63,7 @@ func wafSizeConstraintSetSchema() map[string]*schema.Schema { } } -func diffWafSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUpdate { +func DiffSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUpdate { updates := make([]*waf.SizeConstraintSetUpdate, 0) for _, os := range oldS { @@ -77,7 +77,7 @@ func diffWafSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUp updates = append(updates, &waf.SizeConstraintSetUpdate{ Action: aws.String(waf.ChangeActionDelete), SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: expandFieldToMatch(constraint["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(constraint["field_to_match"].([]interface{})[0].(map[string]interface{})), ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), Size: aws.Int64(int64(constraint["size"].(int))), TextTransformation: aws.String(constraint["text_transformation"].(string)), @@ -91,7 +91,7 @@ func diffWafSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUp updates = append(updates, &waf.SizeConstraintSetUpdate{ Action: aws.String(waf.ChangeActionInsert), SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: expandFieldToMatch(constraint["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(constraint["field_to_match"].([]interface{})[0].(map[string]interface{})), ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), Size: aws.Int64(int64(constraint["size"].(int))), TextTransformation: aws.String(constraint["text_transformation"].(string)), @@ -101,33 +101,33 @@ func diffWafSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUp return updates } -func flattenWafSizeConstraints(sc []*waf.SizeConstraint) []interface{} { +func FlattenSizeConstraints(sc []*waf.SizeConstraint) []interface{} { out := make([]interface{}, len(sc)) for i, c := range sc { m := make(map[string]interface{}) - m["comparison_operator"] = *c.ComparisonOperator + m["comparison_operator"] = aws.StringValue(c.ComparisonOperator) if c.FieldToMatch != nil { m["field_to_match"] = FlattenFieldToMatch(c.FieldToMatch) } - m["size"] = *c.Size - m["text_transformation"] = *c.TextTransformation + m["size"] = aws.Int64Value(c.Size) + m["text_transformation"] = aws.StringValue(c.TextTransformation) out[i] = m } return out } -func flattenWafGeoMatchConstraint(ts []*waf.GeoMatchConstraint) []interface{} { +func FlattenGeoMatchConstraint(ts []*waf.GeoMatchConstraint) []interface{} { out := make([]interface{}, len(ts)) for i, t := range ts { m := make(map[string]interface{}) - m["type"] = *t.Type - m["value"] = *t.Value + m["type"] = aws.StringValue(t.Type) + m["value"] = aws.StringValue(t.Value) out[i] = m } return out } -func diffWafGeoMatchSetConstraints(oldT, newT []interface{}) []*waf.GeoMatchSetUpdate { +func DiffGeoMatchSetConstraints(oldT, newT []interface{}) []*waf.GeoMatchSetUpdate { updates := make([]*waf.GeoMatchSetUpdate, 0) for _, od := range oldT { @@ -161,7 +161,7 @@ func diffWafGeoMatchSetConstraints(oldT, newT []interface{}) []*waf.GeoMatchSetU return updates } -func diffWafRegexPatternSetPatternStrings(oldPatterns, newPatterns []interface{}) []*waf.RegexPatternSetUpdate { +func DiffRegexPatternSetPatternStrings(oldPatterns, newPatterns []interface{}) []*waf.RegexPatternSetUpdate { updates := make([]*waf.RegexPatternSetUpdate, 0) for _, op := range oldPatterns { @@ -185,7 +185,7 @@ func diffWafRegexPatternSetPatternStrings(oldPatterns, newPatterns []interface{} return updates } -func diffWafRulePredicates(oldP, newP []interface{}) []*waf.RuleUpdate { +func DiffRulePredicates(oldP, newP []interface{}) []*waf.RuleUpdate { updates := make([]*waf.RuleUpdate, 0) for _, op := range oldP { @@ -221,7 +221,7 @@ func diffWafRulePredicates(oldP, newP []interface{}) []*waf.RuleUpdate { return updates } -func diffWafRuleGroupActivatedRules(oldRules, newRules []interface{}) []*waf.RuleGroupUpdate { +func DiffRuleGroupActivatedRules(oldRules, newRules []interface{}) []*waf.RuleGroupUpdate { updates := make([]*waf.RuleGroupUpdate, 0) for _, op := range oldRules { @@ -234,7 +234,7 @@ func diffWafRuleGroupActivatedRules(oldRules, newRules []interface{}) []*waf.Rul updates = append(updates, &waf.RuleGroupUpdate{ Action: aws.String(waf.ChangeActionDelete), - ActivatedRule: expandWafActivatedRule(rule), + ActivatedRule: ExpandActivatedRule(rule), }) } @@ -243,24 +243,24 @@ func diffWafRuleGroupActivatedRules(oldRules, newRules []interface{}) []*waf.Rul updates = append(updates, &waf.RuleGroupUpdate{ Action: aws.String(waf.ChangeActionInsert), - ActivatedRule: expandWafActivatedRule(rule), + ActivatedRule: ExpandActivatedRule(rule), }) } return updates } -func flattenWafActivatedRules(activatedRules []*waf.ActivatedRule) []interface{} { +func FlattenActivatedRules(activatedRules []*waf.ActivatedRule) []interface{} { out := make([]interface{}, len(activatedRules)) for i, ar := range activatedRules { rule := map[string]interface{}{ - "priority": int(*ar.Priority), - "rule_id": *ar.RuleId, - "type": *ar.Type, + "priority": aws.Int64Value(ar.Priority), + "rule_id": aws.StringValue(ar.RuleId), + "type": aws.StringValue(ar.Type), } if ar.Action != nil { rule["action"] = []interface{}{ map[string]interface{}{ - "type": *ar.Action.Type, + "type": aws.StringValue(ar.Action.Type), }, } } @@ -269,9 +269,9 @@ func flattenWafActivatedRules(activatedRules []*waf.ActivatedRule) []interface{} return out } -func expandWafActivatedRule(rule map[string]interface{}) *waf.ActivatedRule { +func ExpandActivatedRule(rule map[string]interface{}) *waf.ActivatedRule { r := &waf.ActivatedRule{ - Priority: aws.Int64(int64(rule["priority"].(int))), + Priority: aws.Int64(rule["priority"].(int64)), RuleId: aws.String(rule["rule_id"].(string)), Type: aws.String(rule["type"].(string)), } @@ -285,7 +285,7 @@ func expandWafActivatedRule(rule map[string]interface{}) *waf.ActivatedRule { return r } -func flattenWafRegexMatchTuples(tuples []*waf.RegexMatchTuple) []interface{} { +func FlattenRegexMatchTuples(tuples []*waf.RegexMatchTuple) []interface{} { out := make([]interface{}, len(tuples)) for i, t := range tuples { m := make(map[string]interface{}) @@ -293,24 +293,24 @@ func flattenWafRegexMatchTuples(tuples []*waf.RegexMatchTuple) []interface{} { if t.FieldToMatch != nil { m["field_to_match"] = FlattenFieldToMatch(t.FieldToMatch) } - m["regex_pattern_set_id"] = *t.RegexPatternSetId - m["text_transformation"] = *t.TextTransformation + m["regex_pattern_set_id"] = aws.StringValue(t.RegexPatternSetId) + m["text_transformation"] = aws.StringValue(t.TextTransformation) out[i] = m } return out } -func expandWafRegexMatchTuple(tuple map[string]interface{}) *waf.RegexMatchTuple { +func ExpandRegexMatchTuple(tuple map[string]interface{}) *waf.RegexMatchTuple { ftm := tuple["field_to_match"].([]interface{}) return &waf.RegexMatchTuple{ - FieldToMatch: expandFieldToMatch(ftm[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(ftm[0].(map[string]interface{})), RegexPatternSetId: aws.String(tuple["regex_pattern_set_id"].(string)), TextTransformation: aws.String(tuple["text_transformation"].(string)), } } -func diffWafRegexMatchSetTuples(oldT, newT []interface{}) []*waf.RegexMatchSetUpdate { +func DiffRegexMatchSetTuples(oldT, newT []interface{}) []*waf.RegexMatchSetUpdate { updates := make([]*waf.RegexMatchSetUpdate, 0) for _, ot := range oldT { @@ -323,7 +323,7 @@ func diffWafRegexMatchSetTuples(oldT, newT []interface{}) []*waf.RegexMatchSetUp updates = append(updates, &waf.RegexMatchSetUpdate{ Action: aws.String(waf.ChangeActionDelete), - RegexMatchTuple: expandWafRegexMatchTuple(tuple), + RegexMatchTuple: ExpandRegexMatchTuple(tuple), }) } @@ -332,7 +332,7 @@ func diffWafRegexMatchSetTuples(oldT, newT []interface{}) []*waf.RegexMatchSetUp updates = append(updates, &waf.RegexMatchSetUpdate{ Action: aws.String(waf.ChangeActionInsert), - RegexMatchTuple: expandWafRegexMatchTuple(tuple), + RegexMatchTuple: ExpandRegexMatchTuple(tuple), }) } return updates diff --git a/internal/service/waf/rate_based_rule.go b/internal/service/waf/rate_based_rule.go index 63d35bd49103..aed3c87b8979 100644 --- a/internal/service/waf/rate_based_rule.go +++ b/internal/service/waf/rate_based_rule.go @@ -244,7 +244,7 @@ func updateWafRateBasedRuleResource(id string, oldP, newP []interface{}, rateLim req := &waf.UpdateRateBasedRuleInput{ ChangeToken: token, RuleId: aws.String(id), - Updates: diffWafRulePredicates(oldP, newP), + Updates: DiffRulePredicates(oldP, newP), RateLimit: aws.Int64(int64(rateLimit.(int))), } diff --git a/internal/service/waf/regex_match_set.go b/internal/service/waf/regex_match_set.go index b3694759f757..0af085a93132 100644 --- a/internal/service/waf/regex_match_set.go +++ b/internal/service/waf/regex_match_set.go @@ -116,7 +116,7 @@ func resourceRegexMatchSetRead(d *schema.ResourceData, meta interface{}) error { } d.Set("name", resp.RegexMatchSet.Name) - d.Set("regex_match_tuple", flattenWafRegexMatchTuples(resp.RegexMatchSet.RegexMatchTuples)) + d.Set("regex_match_tuple", FlattenRegexMatchTuples(resp.RegexMatchSet.RegexMatchTuples)) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, @@ -180,7 +180,7 @@ func updateRegexMatchSetResource(id string, oldT, newT []interface{}, conn *waf. req := &waf.UpdateRegexMatchSetInput{ ChangeToken: token, RegexMatchSetId: aws.String(id), - Updates: diffWafRegexMatchSetTuples(oldT, newT), + Updates: DiffRegexMatchSetTuples(oldT, newT), } return conn.UpdateRegexMatchSet(req) diff --git a/internal/service/waf/regex_pattern_set.go b/internal/service/waf/regex_pattern_set.go index 23892eaec646..61d18365581d 100644 --- a/internal/service/waf/regex_pattern_set.go +++ b/internal/service/waf/regex_pattern_set.go @@ -147,7 +147,7 @@ func updateWafRegexPatternSetPatternStrings(id string, oldPatterns, newPatterns req := &waf.UpdateRegexPatternSetInput{ ChangeToken: token, RegexPatternSetId: aws.String(id), - Updates: diffWafRegexPatternSetPatternStrings(oldPatterns, newPatterns), + Updates: DiffRegexPatternSetPatternStrings(oldPatterns, newPatterns), } return conn.UpdateRegexPatternSet(req) diff --git a/internal/service/waf/rule.go b/internal/service/waf/rule.go index 71aa06e49fbd..75fda110b1f7 100644 --- a/internal/service/waf/rule.go +++ b/internal/service/waf/rule.go @@ -275,7 +275,7 @@ func updateWafRuleResource(id string, oldP, newP []interface{}, conn *waf.WAF) e req := &waf.UpdateRuleInput{ ChangeToken: token, RuleId: aws.String(id), - Updates: diffWafRulePredicates(oldP, newP), + Updates: DiffRulePredicates(oldP, newP), } return conn.UpdateRule(req) diff --git a/internal/service/waf/rule_group.go b/internal/service/waf/rule_group.go index 767282977b89..60ac63c349a7 100644 --- a/internal/service/waf/rule_group.go +++ b/internal/service/waf/rule_group.go @@ -171,7 +171,7 @@ func resourceRuleGroupRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting tags_all: %w", err) } - d.Set("activated_rule", flattenWafActivatedRules(rResp.ActivatedRules)) + d.Set("activated_rule", FlattenActivatedRules(rResp.ActivatedRules)) d.Set("name", resp.RuleGroup.Name) d.Set("metric_name", resp.RuleGroup.MetricName) @@ -241,7 +241,7 @@ func updateWafRuleGroupResource(id string, oldRules, newRules []interface{}, con req := &waf.UpdateRuleGroupInput{ ChangeToken: token, RuleGroupId: aws.String(id), - Updates: diffWafRuleGroupActivatedRules(oldRules, newRules), + Updates: DiffRuleGroupActivatedRules(oldRules, newRules), } return conn.UpdateRuleGroup(req) diff --git a/internal/service/waf/size_constraint_set.go b/internal/service/waf/size_constraint_set.go index 56105be4bb15..1aeb4d431564 100644 --- a/internal/service/waf/size_constraint_set.go +++ b/internal/service/waf/size_constraint_set.go @@ -22,7 +22,7 @@ func ResourceSizeConstraintSet() *schema.Resource { State: schema.ImportStatePassthrough, }, - Schema: wafSizeConstraintSetSchema(), + Schema: SizeConstraintSetSchema(), } } @@ -69,7 +69,7 @@ func resourceSizeConstraintSetRead(d *schema.ResourceData, meta interface{}) err } d.Set("name", resp.SizeConstraintSet.Name) - d.Set("size_constraints", flattenWafSizeConstraints(resp.SizeConstraintSet.SizeConstraints)) + d.Set("size_constraints", FlattenSizeConstraints(resp.SizeConstraintSet.SizeConstraints)) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, @@ -132,7 +132,7 @@ func updateSizeConstraintSetResource(id string, oldS, newS []interface{}, conn * req := &waf.UpdateSizeConstraintSetInput{ ChangeToken: token, SizeConstraintSetId: aws.String(id), - Updates: diffWafSizeConstraints(oldS, newS), + Updates: DiffSizeConstraints(oldS, newS), } log.Printf("[INFO] Updating WAF Size Constraint constraints: %s", req) diff --git a/internal/service/waf/sql_injection_match_set.go b/internal/service/waf/sql_injection_match_set.go index e9c0f43a8c02..d7d4dac2a381 100644 --- a/internal/service/waf/sql_injection_match_set.go +++ b/internal/service/waf/sql_injection_match_set.go @@ -200,7 +200,7 @@ func diffWafSqlInjectionMatchTuples(oldT, newT []interface{}) []*waf.SqlInjectio updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ Action: aws.String(waf.ChangeActionDelete), SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) @@ -212,7 +212,7 @@ func diffWafSqlInjectionMatchTuples(oldT, newT []interface{}) []*waf.SqlInjectio updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ Action: aws.String(waf.ChangeActionInsert), SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) diff --git a/internal/service/waf/web_acl.go b/internal/service/waf/web_acl.go index f7e301053651..2889f416cf73 100644 --- a/internal/service/waf/web_acl.go +++ b/internal/service/waf/web_acl.go @@ -158,7 +158,7 @@ func resourceWebACLCreate(d *schema.ResourceData, meta interface{}) error { out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { params := &waf.CreateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: ExpandAction(d.Get("default_action").([]interface{})), MetricName: aws.String(d.Get("metric_name").(string)), Name: aws.String(d.Get("name").(string)), } @@ -201,7 +201,7 @@ func resourceWebACLCreate(d *schema.ResourceData, meta interface{}) error { _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { req := &waf.UpdateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: ExpandAction(d.Get("default_action").([]interface{})), Updates: diffWebACLRules([]interface{}{}, rules), WebACLId: aws.String(d.Id()), } @@ -249,7 +249,7 @@ func resourceWebACLRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", resp.WebACL.WebACLArn) arn := aws.StringValue(resp.WebACL.WebACLArn) - if err := d.Set("default_action", flattenAction(resp.WebACL.DefaultAction)); err != nil { + if err := d.Set("default_action", FlattenAction(resp.WebACL.DefaultAction)); err != nil { return fmt.Errorf("error setting default_action: %w", err) } d.Set("name", resp.WebACL.Name) @@ -271,7 +271,7 @@ func resourceWebACLRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting tags_all: %w", err) } - if err := d.Set("rules", flattenWebACLRules(resp.WebACL.Rules)); err != nil { + if err := d.Set("rules", FlattenWebACLRules(resp.WebACL.Rules)); err != nil { return fmt.Errorf("error setting rules: %w", err) } @@ -308,7 +308,7 @@ func resourceWebACLUpdate(d *schema.ResourceData, meta interface{}) error { _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { req := &waf.UpdateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: ExpandAction(d.Get("default_action").([]interface{})), Updates: diffWebACLRules(oldR, newR), WebACLId: aws.String(d.Id()), } @@ -363,7 +363,7 @@ func resourceWebACLDelete(d *schema.ResourceData, meta interface{}) error { _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { req := &waf.UpdateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: ExpandAction(d.Get("default_action").([]interface{})), Updates: diffWebACLRules(rules, []interface{}{}), WebACLId: aws.String(d.Id()), } @@ -430,7 +430,7 @@ func expandWAFRedactedFields(l []interface{}) []*waf.FieldToMatch { continue } - redactedFields = append(redactedFields, expandFieldToMatch(fieldToMatch.(map[string]interface{}))) + redactedFields = append(redactedFields, ExpandFieldToMatch(fieldToMatch.(map[string]interface{}))) } return redactedFields @@ -493,12 +493,12 @@ func diffWebACLRules(oldR, newR []interface{}) []*waf.WebACLUpdate { newR = append(newR[:idx], newR[idx+1:]...) continue } - updates = append(updates, expandWebACLUpdate(waf.ChangeActionDelete, aclRule)) + updates = append(updates, ExpandWebACLUpdate(waf.ChangeActionDelete, aclRule)) } for _, nr := range newR { aclRule := nr.(map[string]interface{}) - updates = append(updates, expandWebACLUpdate(waf.ChangeActionInsert, aclRule)) + updates = append(updates, ExpandWebACLUpdate(waf.ChangeActionInsert, aclRule)) } return updates } diff --git a/internal/service/waf/xss_match_set.go b/internal/service/waf/xss_match_set.go index a167e4d608b4..ca64347e6aba 100644 --- a/internal/service/waf/xss_match_set.go +++ b/internal/service/waf/xss_match_set.go @@ -219,7 +219,7 @@ func diffXSSMatchSetTuples(oldT, newT []interface{}) []*waf.XssMatchSetUpdate { updates = append(updates, &waf.XssMatchSetUpdate{ Action: aws.String(waf.ChangeActionDelete), XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) @@ -231,7 +231,7 @@ func diffXSSMatchSetTuples(oldT, newT []interface{}) []*waf.XssMatchSetUpdate { updates = append(updates, &waf.XssMatchSetUpdate{ Action: aws.String(waf.ChangeActionInsert), XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) diff --git a/internal/service/wafregional/byte_match_set.go b/internal/service/wafregional/byte_match_set.go index 06a2c45334ce..a45774a00e36 100644 --- a/internal/service/wafregional/byte_match_set.go +++ b/internal/service/wafregional/byte_match_set.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" ) func ResourceByteMatchSet() *schema.Resource { @@ -232,7 +233,7 @@ func diffByteMatchSetTuple(oldT, newT []interface{}) []*waf.ByteMatchSetUpdate { updates = append(updates, &waf.ByteMatchSetUpdate{ Action: aws.String(waf.ChangeActionDelete), ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: tfwaf.ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), TargetString: []byte(tuple["target_string"].(string)), TextTransformation: aws.String(tuple["text_transformation"].(string)), @@ -246,7 +247,7 @@ func diffByteMatchSetTuple(oldT, newT []interface{}) []*waf.ByteMatchSetUpdate { updates = append(updates, &waf.ByteMatchSetUpdate{ Action: aws.String(waf.ChangeActionInsert), ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: tfwaf.ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), TargetString: []byte(tuple["target_string"].(string)), TextTransformation: aws.String(tuple["text_transformation"].(string)), diff --git a/internal/service/wafregional/flex.go b/internal/service/wafregional/flex.go deleted file mode 100644 index 55cbe0050263..000000000000 --- a/internal/service/wafregional/flex.go +++ /dev/null @@ -1,117 +0,0 @@ -package wafregional - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/waf" -) - -func expandAction(l []interface{}) *waf.WafAction { - if len(l) == 0 || l[0] == nil { - return nil - } - - m := l[0].(map[string]interface{}) - - return &waf.WafAction{ - Type: aws.String(m["type"].(string)), - } -} - -func expandOverrideAction(l []interface{}) *waf.WafOverrideAction { - if len(l) == 0 || l[0] == nil { - return nil - } - - m := l[0].(map[string]interface{}) - - return &waf.WafOverrideAction{ - Type: aws.String(m["type"].(string)), - } -} - -func expandWebACLUpdate(updateAction string, aclRule map[string]interface{}) *waf.WebACLUpdate { - var rule *waf.ActivatedRule - - switch aclRule["type"].(string) { - case waf.WafRuleTypeGroup: - rule = &waf.ActivatedRule{ - OverrideAction: expandOverrideAction(aclRule["override_action"].([]interface{})), - Priority: aws.Int64(int64(aclRule["priority"].(int))), - RuleId: aws.String(aclRule["rule_id"].(string)), - Type: aws.String(aclRule["type"].(string)), - } - default: - rule = &waf.ActivatedRule{ - Action: expandAction(aclRule["action"].([]interface{})), - Priority: aws.Int64(int64(aclRule["priority"].(int))), - RuleId: aws.String(aclRule["rule_id"].(string)), - Type: aws.String(aclRule["type"].(string)), - } - } - - update := &waf.WebACLUpdate{ - Action: aws.String(updateAction), - ActivatedRule: rule, - } - - return update -} - -func flattenAction(n *waf.WafAction) []map[string]interface{} { - if n == nil { - return nil - } - - result := map[string]interface{}{ - "type": aws.StringValue(n.Type), - } - - return []map[string]interface{}{result} -} - -func flattenWebACLRules(ts []*waf.ActivatedRule) []map[string]interface{} { - out := make([]map[string]interface{}, len(ts)) - for i, r := range ts { - m := make(map[string]interface{}) - - switch aws.StringValue(r.Type) { - case waf.WafRuleTypeGroup: - actionMap := map[string]interface{}{ - "type": aws.StringValue(r.OverrideAction.Type), - } - m["override_action"] = []map[string]interface{}{actionMap} - default: - actionMap := map[string]interface{}{ - "type": aws.StringValue(r.Action.Type), - } - m["action"] = []map[string]interface{}{actionMap} - } - - m["priority"] = int(aws.Int64Value(r.Priority)) - m["rule_id"] = aws.StringValue(r.RuleId) - m["type"] = aws.StringValue(r.Type) - out[i] = m - } - return out -} - -func expandFieldToMatch(d map[string]interface{}) *waf.FieldToMatch { - ftm := &waf.FieldToMatch{ - Type: aws.String(d["type"].(string)), - } - if data, ok := d["data"].(string); ok && data != "" { - ftm.Data = aws.String(data) - } - return ftm -} - -func FlattenFieldToMatch(fm *waf.FieldToMatch) []interface{} { - m := make(map[string]interface{}) - if fm.Data != nil { - m["data"] = *fm.Data - } - if fm.Type != nil { - m["type"] = *fm.Type - } - return []interface{}{m} -} diff --git a/internal/service/wafregional/geo_match_set.go b/internal/service/wafregional/geo_match_set.go index 673ab9db8658..11e510d89234 100644 --- a/internal/service/wafregional/geo_match_set.go +++ b/internal/service/wafregional/geo_match_set.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" ) func ResourceGeoMatchSet() *schema.Resource { @@ -92,7 +93,7 @@ func resourceGeoMatchSetRead(d *schema.ResourceData, meta interface{}) error { } d.Set("name", resp.GeoMatchSet.Name) - d.Set("geo_match_constraint", flattenWafGeoMatchConstraint(resp.GeoMatchSet.GeoMatchConstraints)) + d.Set("geo_match_constraint", tfwaf.FlattenGeoMatchConstraint(resp.GeoMatchSet.GeoMatchConstraints)) return nil } @@ -157,7 +158,7 @@ func updateGeoMatchSetResourceWR(id string, oldConstraints, newConstraints []int req := &waf.UpdateGeoMatchSetInput{ ChangeToken: token, GeoMatchSetId: aws.String(id), - Updates: diffWafGeoMatchSetConstraints(oldConstraints, newConstraints), + Updates: tfwaf.DiffGeoMatchSetConstraints(oldConstraints, newConstraints), } log.Printf("[INFO] Updating WAF Regional Geo Match Set constraints: %s", req) diff --git a/internal/service/wafregional/rate_based_rule.go b/internal/service/wafregional/rate_based_rule.go index 7bb2c6eaa081..8f0739130f2f 100644 --- a/internal/service/wafregional/rate_based_rule.go +++ b/internal/service/wafregional/rate_based_rule.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -258,7 +259,7 @@ func updateWafRateBasedRuleResourceWR(id string, oldP, newP []interface{}, rateL req := &waf.UpdateRateBasedRuleInput{ ChangeToken: token, RuleId: aws.String(id), - Updates: diffWafRulePredicates(oldP, newP), + Updates: tfwaf.DiffRulePredicates(oldP, newP), RateLimit: aws.Int64(int64(rateLimit.(int))), } diff --git a/internal/service/wafregional/regex_match_set.go b/internal/service/wafregional/regex_match_set.go index b5b1f21c9e14..9ecdf03dac97 100644 --- a/internal/service/wafregional/regex_match_set.go +++ b/internal/service/wafregional/regex_match_set.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" ) func ResourceRegexMatchSet() *schema.Resource { @@ -32,7 +33,7 @@ func ResourceRegexMatchSet() *schema.Resource { "regex_match_tuple": { Type: schema.TypeSet, Optional: true, - Set: WAFRegexMatchSetTupleHash, + Set: tfwaf.RegexMatchSetTupleHash, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "field_to_match": { @@ -109,7 +110,7 @@ func resourceRegexMatchSetRead(d *schema.ResourceData, meta interface{}) error { } d.Set("name", set.Name) - d.Set("regex_match_tuple", flattenWafRegexMatchTuples(set.RegexMatchTuples)) + d.Set("regex_match_tuple", tfwaf.FlattenRegexMatchTuples(set.RegexMatchTuples)) return nil } @@ -158,7 +159,7 @@ func getRegexMatchTuplesFromResourceData(d *schema.ResourceData) []*waf.RegexMat result := []*waf.RegexMatchTuple{} for _, t := range d.Get("regex_match_tuple").(*schema.Set).List() { - result = append(result, expandWafRegexMatchTuple(t.(map[string]interface{}))) + result = append(result, tfwaf.ExpandRegexMatchTuple(t.(map[string]interface{}))) } return result @@ -224,7 +225,7 @@ func updateRegexMatchSetResourceWR(id string, oldT, newT []interface{}, conn *wa req := &waf.UpdateRegexMatchSetInput{ ChangeToken: token, RegexMatchSetId: aws.String(id), - Updates: diffWafRegexMatchSetTuples(oldT, newT), + Updates: tfwaf.DiffRegexMatchSetTuples(oldT, newT), } return conn.UpdateRegexMatchSet(req) diff --git a/internal/service/wafregional/regex_match_set_test.go b/internal/service/wafregional/regex_match_set_test.go index d5283ae2dc6d..0e0df7b71f16 100644 --- a/internal/service/wafregional/regex_match_set_test.go +++ b/internal/service/wafregional/regex_match_set_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" tfwafregional "github.com/hashicorp/terraform-provider-aws/internal/service/wafregional" ) @@ -301,12 +302,12 @@ resource "aws_wafregional_regex_match_set" "test" { func computeWafRegexMatchSetTuple(patternSet *waf.RegexPatternSet, fieldToMatch *waf.FieldToMatch, textTransformation string, idx *int) resource.TestCheckFunc { return func(s *terraform.State) error { m := map[string]interface{}{ - "field_to_match": tfwafregional.FlattenFieldToMatch(fieldToMatch), + "field_to_match": tfwaf.FlattenFieldToMatch(fieldToMatch), "regex_pattern_set_id": *patternSet.RegexPatternSetId, "text_transformation": textTransformation, } - *idx = tfwafregional.WAFRegexMatchSetTupleHash(m) + *idx = tfwaf.RegexMatchSetTupleHash(m) return nil } diff --git a/internal/service/wafregional/regex_pattern_set.go b/internal/service/wafregional/regex_pattern_set.go index 9d0d1368086f..a7ebdf9508a7 100644 --- a/internal/service/wafregional/regex_pattern_set.go +++ b/internal/service/wafregional/regex_pattern_set.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" ) func ResourceRegexPatternSet() *schema.Resource { @@ -147,7 +148,7 @@ func updateWafRegionalRegexPatternSetPatternStringsWR(id string, oldPatterns, ne req := &waf.UpdateRegexPatternSetInput{ ChangeToken: token, RegexPatternSetId: aws.String(id), - Updates: diffWafRegexPatternSetPatternStrings(oldPatterns, newPatterns), + Updates: tfwaf.DiffRegexPatternSetPatternStrings(oldPatterns, newPatterns), } return conn.UpdateRegexPatternSet(req) diff --git a/internal/service/wafregional/rule.go b/internal/service/wafregional/rule.go index 9b56c5583217..24cd78ee26de 100644 --- a/internal/service/wafregional/rule.go +++ b/internal/service/wafregional/rule.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -223,7 +224,7 @@ func updateWafRegionalRuleResource(id string, oldP, newP []interface{}, meta int req := &waf.UpdateRuleInput{ ChangeToken: token, RuleId: aws.String(id), - Updates: diffWafRulePredicates(oldP, newP), + Updates: tfwaf.DiffRulePredicates(oldP, newP), } return conn.UpdateRule(req) diff --git a/internal/service/wafregional/rule_group.go b/internal/service/wafregional/rule_group.go index 5c3bc82b19f0..e597e9d051e7 100644 --- a/internal/service/wafregional/rule_group.go +++ b/internal/service/wafregional/rule_group.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -173,7 +174,7 @@ func resourceRuleGroupRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting tags_all: %w", err) } - d.Set("activated_rule", FlattenWAFActivatedRules(rResp.ActivatedRules)) + d.Set("activated_rule", tfwaf.FlattenActivatedRules(rResp.ActivatedRules)) d.Set("name", resp.RuleGroup.Name) d.Set("metric_name", resp.RuleGroup.MetricName) @@ -245,7 +246,7 @@ func updateWafRuleGroupResourceWR(id string, oldRules, newRules []interface{}, c req := &waf.UpdateRuleGroupInput{ ChangeToken: token, RuleGroupId: aws.String(id), - Updates: diffWafRuleGroupActivatedRules(oldRules, newRules), + Updates: tfwaf.DiffRuleGroupActivatedRules(oldRules, newRules), } return conn.UpdateRuleGroup(req) diff --git a/internal/service/wafregional/size_constraint_set.go b/internal/service/wafregional/size_constraint_set.go index 3e82ef696632..b74ca02124d2 100644 --- a/internal/service/wafregional/size_constraint_set.go +++ b/internal/service/wafregional/size_constraint_set.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" ) func ResourceSizeConstraintSet() *schema.Resource { @@ -22,7 +23,7 @@ func ResourceSizeConstraintSet() *schema.Resource { State: schema.ImportStatePassthrough, }, - Schema: wafSizeConstraintSetSchema(), + Schema: tfwaf.SizeConstraintSetSchema(), } } @@ -72,7 +73,7 @@ func resourceSizeConstraintSetRead(d *schema.ResourceData, meta interface{}) err } d.Set("name", resp.SizeConstraintSet.Name) - d.Set("size_constraints", flattenWafSizeConstraints(resp.SizeConstraintSet.SizeConstraints)) + d.Set("size_constraints", tfwaf.FlattenSizeConstraints(resp.SizeConstraintSet.SizeConstraints)) return nil } @@ -139,7 +140,7 @@ func updateRegionalSizeConstraintSetResource(id string, oldConstraints, newConst req := &waf.UpdateSizeConstraintSetInput{ ChangeToken: token, SizeConstraintSetId: aws.String(id), - Updates: diffWafSizeConstraints(oldConstraints, newConstraints), + Updates: tfwaf.DiffSizeConstraints(oldConstraints, newConstraints), } log.Printf("[INFO] Updating WAF Regional SizeConstraintSet: %s", req) diff --git a/internal/service/wafregional/sql_injection_match_set.go b/internal/service/wafregional/sql_injection_match_set.go index 36d80c4f6967..29be05cde76c 100644 --- a/internal/service/wafregional/sql_injection_match_set.go +++ b/internal/service/wafregional/sql_injection_match_set.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" ) func ResourceSQLInjectionMatchSet() *schema.Resource { @@ -206,7 +207,7 @@ func diffWafSqlInjectionMatchTuplesWR(oldT, newT []interface{}) []*waf.SqlInject updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ Action: aws.String(waf.ChangeActionDelete), SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: expandFieldToMatch(ftm[0].(map[string]interface{})), + FieldToMatch: tfwaf.ExpandFieldToMatch(ftm[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) @@ -219,7 +220,7 @@ func diffWafSqlInjectionMatchTuplesWR(oldT, newT []interface{}) []*waf.SqlInject updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ Action: aws.String(waf.ChangeActionInsert), SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: expandFieldToMatch(ftm[0].(map[string]interface{})), + FieldToMatch: tfwaf.ExpandFieldToMatch(ftm[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) @@ -249,7 +250,7 @@ func flattenSQLInjectionMatchTuples(ts []*waf.SqlInjectionMatchTuple) []interfac for i, t := range ts { m := make(map[string]interface{}) m["text_transformation"] = aws.StringValue(t.TextTransformation) - m["field_to_match"] = FlattenFieldToMatch(t.FieldToMatch) + m["field_to_match"] = tfwaf.FlattenFieldToMatch(t.FieldToMatch) out[i] = m } diff --git a/internal/service/wafregional/waf_helpers.go b/internal/service/wafregional/waf_helpers.go deleted file mode 100644 index 81fa9a7a57d8..000000000000 --- a/internal/service/wafregional/waf_helpers.go +++ /dev/null @@ -1,357 +0,0 @@ -package wafregional - -import ( - "bytes" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/internal/verify" -) - -func wafSizeConstraintSetSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "size_constraints": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_to_match": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "data": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "comparison_operator": { - Type: schema.TypeString, - Required: true, - }, - "size": { - Type: schema.TypeInt, - Required: true, - }, - "text_transformation": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - } -} - -func diffWafSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUpdate { - updates := make([]*waf.SizeConstraintSetUpdate, 0) - - for _, os := range oldS { - constraint := os.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newS, constraint); contains { - newS = append(newS[:idx], newS[idx+1:]...) - continue - } - - updates = append(updates, &waf.SizeConstraintSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: expandFieldToMatch(constraint["field_to_match"].([]interface{})[0].(map[string]interface{})), - ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), - Size: aws.Int64(int64(constraint["size"].(int))), - TextTransformation: aws.String(constraint["text_transformation"].(string)), - }, - }) - } - - for _, ns := range newS { - constraint := ns.(map[string]interface{}) - - updates = append(updates, &waf.SizeConstraintSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: expandFieldToMatch(constraint["field_to_match"].([]interface{})[0].(map[string]interface{})), - ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), - Size: aws.Int64(int64(constraint["size"].(int))), - TextTransformation: aws.String(constraint["text_transformation"].(string)), - }, - }) - } - return updates -} - -func flattenWafSizeConstraints(sc []*waf.SizeConstraint) []interface{} { - out := make([]interface{}, len(sc)) - for i, c := range sc { - m := make(map[string]interface{}) - m["comparison_operator"] = *c.ComparisonOperator - if c.FieldToMatch != nil { - m["field_to_match"] = FlattenFieldToMatch(c.FieldToMatch) - } - m["size"] = *c.Size - m["text_transformation"] = *c.TextTransformation - out[i] = m - } - return out -} - -func flattenWafGeoMatchConstraint(ts []*waf.GeoMatchConstraint) []interface{} { - out := make([]interface{}, len(ts)) - for i, t := range ts { - m := make(map[string]interface{}) - m["type"] = *t.Type - m["value"] = *t.Value - out[i] = m - } - return out -} - -func diffWafGeoMatchSetConstraints(oldT, newT []interface{}) []*waf.GeoMatchSetUpdate { - updates := make([]*waf.GeoMatchSetUpdate, 0) - - for _, od := range oldT { - constraint := od.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newT, constraint); contains { - newT = append(newT[:idx], newT[idx+1:]...) - continue - } - - updates = append(updates, &waf.GeoMatchSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - GeoMatchConstraint: &waf.GeoMatchConstraint{ - Type: aws.String(constraint["type"].(string)), - Value: aws.String(constraint["value"].(string)), - }, - }) - } - - for _, nd := range newT { - constraint := nd.(map[string]interface{}) - - updates = append(updates, &waf.GeoMatchSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - GeoMatchConstraint: &waf.GeoMatchConstraint{ - Type: aws.String(constraint["type"].(string)), - Value: aws.String(constraint["value"].(string)), - }, - }) - } - return updates -} - -func diffWafRegexPatternSetPatternStrings(oldPatterns, newPatterns []interface{}) []*waf.RegexPatternSetUpdate { - updates := make([]*waf.RegexPatternSetUpdate, 0) - - for _, op := range oldPatterns { - if idx, contains := verify.SliceContainsString(newPatterns, op.(string)); contains { - newPatterns = append(newPatterns[:idx], newPatterns[idx+1:]...) - continue - } - - updates = append(updates, &waf.RegexPatternSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - RegexPatternString: aws.String(op.(string)), - }) - } - - for _, np := range newPatterns { - updates = append(updates, &waf.RegexPatternSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - RegexPatternString: aws.String(np.(string)), - }) - } - return updates -} - -func diffWafRulePredicates(oldP, newP []interface{}) []*waf.RuleUpdate { - updates := make([]*waf.RuleUpdate, 0) - - for _, op := range oldP { - predicate := op.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newP, predicate); contains { - newP = append(newP[:idx], newP[idx+1:]...) - continue - } - - updates = append(updates, &waf.RuleUpdate{ - Action: aws.String(waf.ChangeActionDelete), - Predicate: &waf.Predicate{ - Negated: aws.Bool(predicate["negated"].(bool)), - Type: aws.String(predicate["type"].(string)), - DataId: aws.String(predicate["data_id"].(string)), - }, - }) - } - - for _, np := range newP { - predicate := np.(map[string]interface{}) - - updates = append(updates, &waf.RuleUpdate{ - Action: aws.String(waf.ChangeActionInsert), - Predicate: &waf.Predicate{ - Negated: aws.Bool(predicate["negated"].(bool)), - Type: aws.String(predicate["type"].(string)), - DataId: aws.String(predicate["data_id"].(string)), - }, - }) - } - return updates -} - -func diffWafRuleGroupActivatedRules(oldRules, newRules []interface{}) []*waf.RuleGroupUpdate { - updates := make([]*waf.RuleGroupUpdate, 0) - - for _, op := range oldRules { - rule := op.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newRules, rule); contains { - newRules = append(newRules[:idx], newRules[idx+1:]...) - continue - } - - updates = append(updates, &waf.RuleGroupUpdate{ - Action: aws.String(waf.ChangeActionDelete), - ActivatedRule: expandWafActivatedRule(rule), - }) - } - - for _, np := range newRules { - rule := np.(map[string]interface{}) - - updates = append(updates, &waf.RuleGroupUpdate{ - Action: aws.String(waf.ChangeActionInsert), - ActivatedRule: expandWafActivatedRule(rule), - }) - } - return updates -} - -func FlattenWAFActivatedRules(activatedRules []*waf.ActivatedRule) []interface{} { - out := make([]interface{}, len(activatedRules)) - for i, ar := range activatedRules { - rule := map[string]interface{}{ - "priority": int(*ar.Priority), - "rule_id": *ar.RuleId, - "type": *ar.Type, - } - if ar.Action != nil { - rule["action"] = []interface{}{ - map[string]interface{}{ - "type": *ar.Action.Type, - }, - } - } - out[i] = rule - } - return out -} - -func expandWafActivatedRule(rule map[string]interface{}) *waf.ActivatedRule { - r := &waf.ActivatedRule{ - Priority: aws.Int64(int64(rule["priority"].(int))), - RuleId: aws.String(rule["rule_id"].(string)), - Type: aws.String(rule["type"].(string)), - } - - if a, ok := rule["action"].([]interface{}); ok && len(a) > 0 { - m := a[0].(map[string]interface{}) - r.Action = &waf.WafAction{ - Type: aws.String(m["type"].(string)), - } - } - return r -} - -func flattenWafRegexMatchTuples(tuples []*waf.RegexMatchTuple) []interface{} { - out := make([]interface{}, len(tuples)) - for i, t := range tuples { - m := make(map[string]interface{}) - - if t.FieldToMatch != nil { - m["field_to_match"] = FlattenFieldToMatch(t.FieldToMatch) - } - m["regex_pattern_set_id"] = *t.RegexPatternSetId - m["text_transformation"] = *t.TextTransformation - - out[i] = m - } - return out -} - -func expandWafRegexMatchTuple(tuple map[string]interface{}) *waf.RegexMatchTuple { - ftm := tuple["field_to_match"].([]interface{}) - return &waf.RegexMatchTuple{ - FieldToMatch: expandFieldToMatch(ftm[0].(map[string]interface{})), - RegexPatternSetId: aws.String(tuple["regex_pattern_set_id"].(string)), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - } -} - -func diffWafRegexMatchSetTuples(oldT, newT []interface{}) []*waf.RegexMatchSetUpdate { - updates := make([]*waf.RegexMatchSetUpdate, 0) - - for _, ot := range oldT { - tuple := ot.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newT, tuple); contains { - newT = append(newT[:idx], newT[idx+1:]...) - continue - } - - updates = append(updates, &waf.RegexMatchSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - RegexMatchTuple: expandWafRegexMatchTuple(tuple), - }) - } - - for _, nt := range newT { - tuple := nt.(map[string]interface{}) - - updates = append(updates, &waf.RegexMatchSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - RegexMatchTuple: expandWafRegexMatchTuple(tuple), - }) - } - return updates -} - -func WAFRegexMatchSetTupleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if v, ok := m["field_to_match"]; ok { - ftms := v.([]interface{}) - ftm := ftms[0].(map[string]interface{}) - - if v, ok := ftm["data"]; ok { - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(v.(string)))) - } - buf.WriteString(fmt.Sprintf("%s-", ftm["type"].(string))) - } - buf.WriteString(fmt.Sprintf("%s-", m["regex_pattern_set_id"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["text_transformation"].(string))) - - return create.StringHashcode(buf.String()) -} diff --git a/internal/service/wafregional/web_acl.go b/internal/service/wafregional/web_acl.go index 0b9a07628972..54a13e92d3d0 100644 --- a/internal/service/wafregional/web_acl.go +++ b/internal/service/wafregional/web_acl.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -186,7 +187,7 @@ func resourceWebACLCreate(d *schema.ResourceData, meta interface{}) error { out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { params := &waf.CreateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: tfwaf.ExpandAction(d.Get("default_action").([]interface{})), MetricName: aws.String(d.Get("metric_name").(string)), Name: aws.String(d.Get("name").(string)), } @@ -234,7 +235,7 @@ func resourceWebACLCreate(d *schema.ResourceData, meta interface{}) error { _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { req := &waf.UpdateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: tfwaf.ExpandAction(d.Get("default_action").([]interface{})), Updates: diffWebACLRules([]interface{}{}, rules), WebACLId: aws.String(d.Id()), } @@ -287,12 +288,12 @@ func resourceWebACLRead(d *schema.ResourceData, meta interface{}) error { } d.Set("arn", webACLARN) - if err := d.Set("default_action", flattenAction(resp.WebACL.DefaultAction)); err != nil { + if err := d.Set("default_action", tfwaf.FlattenAction(resp.WebACL.DefaultAction)); err != nil { return fmt.Errorf("error setting default_action: %s", err) } d.Set("name", resp.WebACL.Name) d.Set("metric_name", resp.WebACL.MetricName) - if err := d.Set("rule", flattenWebACLRules(resp.WebACL.Rules)); err != nil { + if err := d.Set("rule", tfwaf.FlattenWebACLRules(resp.WebACL.Rules)); err != nil { return fmt.Errorf("error setting rule: %s", err) } @@ -347,7 +348,7 @@ func resourceWebACLUpdate(d *schema.ResourceData, meta interface{}) error { _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { req := &waf.UpdateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: tfwaf.ExpandAction(d.Get("default_action").([]interface{})), Updates: diffWebACLRules(oldR, newR), WebACLId: aws.String(d.Id()), } @@ -404,7 +405,7 @@ func resourceWebACLDelete(d *schema.ResourceData, meta interface{}) error { _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { req := &waf.UpdateWebACLInput{ ChangeToken: token, - DefaultAction: expandAction(d.Get("default_action").([]interface{})), + DefaultAction: tfwaf.ExpandAction(d.Get("default_action").([]interface{})), Updates: diffWebACLRules(rules, []interface{}{}), WebACLId: aws.String(d.Id()), } @@ -467,7 +468,7 @@ func expandWAFRegionalRedactedFields(l []interface{}) []*waf.FieldToMatch { continue } - redactedFields = append(redactedFields, expandFieldToMatch(fieldToMatch.(map[string]interface{}))) + redactedFields = append(redactedFields, tfwaf.ExpandFieldToMatch(fieldToMatch.(map[string]interface{}))) } return redactedFields @@ -510,7 +511,7 @@ func flattenWAFRegionalRedactedFields(fieldToMatches []*waf.FieldToMatch) []inte l := make([]interface{}, len(fieldToMatches)) for i, fieldToMatch := range fieldToMatches { - l[i] = FlattenFieldToMatch(fieldToMatch)[0] + l[i] = tfwaf.FlattenFieldToMatch(fieldToMatch)[0] } m := map[string]interface{}{ @@ -530,12 +531,12 @@ func diffWebACLRules(oldR, newR []interface{}) []*waf.WebACLUpdate { newR = append(newR[:idx], newR[idx+1:]...) continue } - updates = append(updates, expandWebACLUpdate(waf.ChangeActionDelete, aclRule)) + updates = append(updates, tfwaf.ExpandWebACLUpdate(waf.ChangeActionDelete, aclRule)) } for _, nr := range newR { aclRule := nr.(map[string]interface{}) - updates = append(updates, expandWebACLUpdate(waf.ChangeActionInsert, aclRule)) + updates = append(updates, tfwaf.ExpandWebACLUpdate(waf.ChangeActionInsert, aclRule)) } return updates } diff --git a/internal/service/wafregional/xss_match_set.go b/internal/service/wafregional/xss_match_set.go index 6d237771952a..8ef835e9cb1f 100644 --- a/internal/service/wafregional/xss_match_set.go +++ b/internal/service/wafregional/xss_match_set.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" ) func ResourceXSSMatchSet() *schema.Resource { @@ -195,7 +196,7 @@ func flattenXSSMatchTuples(ts []*waf.XssMatchTuple) []interface{} { out := make([]interface{}, len(ts)) for i, t := range ts { m := make(map[string]interface{}) - m["field_to_match"] = FlattenFieldToMatch(t.FieldToMatch) + m["field_to_match"] = tfwaf.FlattenFieldToMatch(t.FieldToMatch) m["text_transformation"] = aws.StringValue(t.TextTransformation) out[i] = m } @@ -216,7 +217,7 @@ func diffXSSMatchSetTuples(oldT, newT []interface{}) []*waf.XssMatchSetUpdate { updates = append(updates, &waf.XssMatchSetUpdate{ Action: aws.String(waf.ChangeActionDelete), XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: tfwaf.ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) @@ -228,7 +229,7 @@ func diffXSSMatchSetTuples(oldT, newT []interface{}) []*waf.XssMatchSetUpdate { updates = append(updates, &waf.XssMatchSetUpdate{ Action: aws.String(waf.ChangeActionInsert), XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), + FieldToMatch: tfwaf.ExpandFieldToMatch(tuple["field_to_match"].([]interface{})[0].(map[string]interface{})), TextTransformation: aws.String(tuple["text_transformation"].(string)), }, }) From 18194ff26667e1be544d0fcc33c5242c83bb1941 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 15:37:38 -0500 Subject: [PATCH 231/304] Passes 'go install' and 'go test'. --- internal/provider/provider.go | 2 + internal/service/iot/thing_group.go | 101 ++++++----- .../service/iot/thing_group_membership.go | 42 ++--- .../iot/thing_group_membership_test.go | 61 ++++--- internal/service/iot/thing_group_test.go | 170 ++++++++++-------- 5 files changed, 208 insertions(+), 168 deletions(-) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 36cd51c14a91..d1986a8ed562 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1266,6 +1266,8 @@ func Provider() *schema.Provider { "aws_iot_policy_attachment": iot.ResourcePolicyAttachment(), "aws_iot_role_alias": iot.ResourceRoleAlias(), "aws_iot_thing": iot.ResourceThing(), + "aws_iot_thing_group": iot.ResourceThingGroup(), + "aws_iot_thing_group_membership": iot.ResourceThingGroupMembership(), "aws_iot_thing_principal_attachment": iot.ResourceThingPrincipalAttachment(), "aws_iot_thing_type": iot.ResourceThingType(), "aws_iot_topic_rule": iot.ResourceTopicRule(), diff --git a/internal/service/iot/thing_group.go b/internal/service/iot/thing_group.go index 96e38ac47894..d043542736ef 100644 --- a/internal/service/iot/thing_group.go +++ b/internal/service/iot/thing_group.go @@ -1,4 +1,4 @@ -package aws +package iot import ( "fmt" @@ -6,23 +6,31 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -func resourceAwsIotThingGroup() *schema.Resource { +func ResourceThingGroup() *schema.Resource { return &schema.Resource{ - Create: resourceAwsIotThingGroupCreate, - Read: resourceAwsIotThingGroupRead, - Update: resourceAwsIotThingGroupUpdate, - Delete: resourceAwsIotThingGroupDelete, + Create: resourceThingGroupCreate, + Read: resourceThingGroupRead, + Update: resourceThingGroupUpdate, + Delete: resourceThingGroupDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "name": { Type: schema.TypeString, Required: true, @@ -52,7 +60,6 @@ func resourceAwsIotThingGroup() *schema.Resource { }, }, }, - "tags": tagsSchema(), "metadata": { Type: schema.TypeList, Computed: true, @@ -89,19 +96,21 @@ func resourceAwsIotThingGroup() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), }, + + CustomizeDiff: verify.SetTagsDiff, } } -func resourceAwsIotThingGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iotconn +func resourceThingGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).IoTConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) + input := &iot.CreateThingGroupInput{ ThingGroupName: aws.String(d.Get("name").(string)), - Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().IotTags(), } if v, ok := d.GetOk("parent_group_name"); ok { @@ -111,6 +120,10 @@ func resourceAwsIotThingGroupCreate(d *schema.ResourceData, meta interface{}) er input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) } + if len(tags) > 0 { + input.Tags = Tags(tags.IgnoreAWS()) + } + log.Printf("[DEBUG] Creating IoT Thing Group: %s", input) out, err := conn.CreateThingGroup(input) if err != nil { @@ -118,12 +131,13 @@ func resourceAwsIotThingGroupCreate(d *schema.ResourceData, meta interface{}) er } d.SetId(aws.StringValue(out.ThingGroupName)) - return resourceAwsIotThingGroupRead(d, meta) + return resourceThingGroupRead(d, meta) } -func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iotconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig +func resourceThingGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).IoTConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig input := &iot.DescribeThingGroupInput{ ThingGroupName: aws.String(d.Id()), @@ -132,7 +146,7 @@ func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) erro out, err := conn.DescribeThingGroup(input) if err != nil { - if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { log.Printf("[WARN] IoT Thing Group %q not found, removing from state", d.Id()) d.SetId("") } @@ -151,20 +165,27 @@ func resourceAwsIotThingGroupRead(d *schema.ResourceData, meta interface{}) erro } d.Set("version", out.Version) - tags, err := keyvaluetags.IotListTags(conn, *out.ThingGroupArn) + tags, err := ListTags(conn, aws.StringValue(out.ThingGroupArn)) if err != nil { - return fmt.Errorf("error listing tags for Iot Thing Group (%s): %s", d.Id(), err) + return fmt.Errorf("error listing tags for IoT Thing Group (%s): %w", aws.StringValue(out.ThingGroupArn), err) } - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return fmt.Errorf("error setting tags_all: %w", err) } return nil } -func resourceAwsIotThingGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iotconn +func resourceThingGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).IoTConn input := &iot.UpdateThingGroupInput{ ThingGroupName: aws.String(d.Get("name").(string)), @@ -174,26 +195,24 @@ func resourceAwsIotThingGroupUpdate(d *schema.ResourceData, meta interface{}) er input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) } - if d.HasChange("tags") { - oldTags, newTags := d.GetChange("tags") - - if v, ok := d.GetOk("arn"); ok { - if err := keyvaluetags.IotUpdateTags(conn, v.(string), oldTags, newTags); err != nil { - return fmt.Errorf("error updating Iot Thing Group (%s) tags: %s", d.Id(), err) - } - } - } - _, err := conn.UpdateThingGroup(input) if err != nil { return err } - return resourceAwsIotThingGroupRead(d, meta) + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + + return resourceThingGroupRead(d, meta) } -func resourceAwsIotThingGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iotconn +func resourceThingGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).IoTConn input := &iot.DeleteThingGroupInput{ ThingGroupName: aws.String(d.Id()), @@ -202,7 +221,7 @@ func resourceAwsIotThingGroupDelete(d *schema.ResourceData, meta interface{}) er _, err := conn.DeleteThingGroup(input) if err != nil { - if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { return nil } return err @@ -218,7 +237,7 @@ func expandIotThingsGroupProperties(l []interface{}) *iot.ThingGroupProperties { if v, ok := m["attributes"]; ok { thingGroupProperties.AttributePayload = &iot.AttributePayload{ - Attributes: stringMapToPointers(v.(map[string]interface{})), + Attributes: flex.ExpandStringMap(v.(map[string]interface{})), } } diff --git a/internal/service/iot/thing_group_membership.go b/internal/service/iot/thing_group_membership.go index b9245696d187..a83626d01a34 100644 --- a/internal/service/iot/thing_group_membership.go +++ b/internal/service/iot/thing_group_membership.go @@ -1,4 +1,4 @@ -package aws +package iot import ( "fmt" @@ -7,17 +7,19 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" ) -func resourceAwsIotThingGroupAttachment() *schema.Resource { +func ResourceThingGroupMembership() *schema.Resource { return &schema.Resource{ - Create: resourceAwsIotThingGroupAttachmentCreate, - Read: resourceAwsIotThingGroupAttachmentRead, - Delete: resourceAwsIotThingGroupAttachmentDelete, + Create: resourceThingGroupMembershipCreate, + Read: resourceThingGroupMembershipRead, + Delete: resourceThingGroupMembershipDelete, Importer: &schema.ResourceImporter{ - State: resourceAwsIotThingGroupAttachmentImport, + State: resourceThingGroupMembershipImport, }, Schema: map[string]*schema.Schema{ @@ -40,8 +42,8 @@ func resourceAwsIotThingGroupAttachment() *schema.Resource { } } -func resourceAwsIotThingGroupAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iotconn +func resourceThingGroupMembershipCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).IoTConn params := &iot.AddThingToThingGroupInput{} params.ThingName = aws.String(d.Get("thing_name").(string)) @@ -59,18 +61,18 @@ func resourceAwsIotThingGroupAttachmentCreate(d *schema.ResourceData, meta inter d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-%s", *params.ThingName, *params.ThingGroupName))) - return resourceAwsIotThingGroupAttachmentRead(d, meta) + return resourceThingGroupMembershipRead(d, meta) } -func resourceAwsIotThingGroupAttachmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iotconn +func resourceThingGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).IoTConn thingName := d.Get("thing_name").(string) thingGroupName := d.Get("thing_group_name").(string) - hasThingGroup, err := iotThingHasThingGroup(conn, thingName, thingGroupName, "") + hasThingGroup, err := IotThingHasThingGroup(conn, thingName, thingGroupName, "") - if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { log.Printf("[WARN] IoT Thing (%s) is not found", thingName) d.SetId("") return nil @@ -95,8 +97,8 @@ func resourceAwsIotThingGroupAttachmentRead(d *schema.ResourceData, meta interfa return nil } -func resourceAwsIotThingGroupAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iotconn +func resourceThingGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).IoTConn params := &iot.RemoveThingFromThingGroupInput{} params.ThingName = aws.String(d.Get("thing_name").(string)) @@ -111,7 +113,7 @@ func resourceAwsIotThingGroupAttachmentDelete(d *schema.ResourceData, meta inter return nil } -func resourceAwsIotThingGroupAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { +func resourceThingGroupMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { idParts := strings.SplitN(d.Id(), "/", 2) if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { return nil, fmt.Errorf("unexpected format of ID (%q), expected /", d.Id()) @@ -128,7 +130,7 @@ func resourceAwsIotThingGroupAttachmentImport(d *schema.ResourceData, meta inter return []*schema.ResourceData{d}, nil } -func iotThingHasThingGroup(conn *iot.IoT, thingName string, thingGroupName string, nextToken string) (bool, error) { +func IotThingHasThingGroup(conn *iot.IoT, thingName string, thingGroupName string, nextToken string) (bool, error) { maxResults := int64(20) params := &iot.ListThingGroupsForThingInput{ @@ -157,7 +159,7 @@ func iotThingHasThingGroup(conn *iot.IoT, thingName string, thingGroupName strin // then check if NextToken exists. If it is so call hasThingGroup // recursively to search in next part of list. Otherwise return false if out.NextToken != nil { - return iotThingHasThingGroup(conn, thingName, thingGroupName, *out.NextToken) + return IotThingHasThingGroup(conn, thingName, thingGroupName, *out.NextToken) } else { return false, nil } diff --git a/internal/service/iot/thing_group_membership_test.go b/internal/service/iot/thing_group_membership_test.go index 69fa53b795ae..eefb5d645947 100644 --- a/internal/service/iot/thing_group_membership_test.go +++ b/internal/service/iot/thing_group_membership_test.go @@ -1,35 +1,40 @@ -package aws +package iot_test import ( "fmt" "testing" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfiot "github.com/hashicorp/terraform-provider-aws/internal/service/iot" ) -func TestAccAWSIotThingGroupAttachment_basic(t *testing.T) { - rString := acctest.RandString(8) +func TestAccIoTThingGroupMembership_basic(t *testing.T) { + rString := sdkacctest.RandString(8) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupAttachmentDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupMembershipDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSIotThingGroupAttachmentConfig_basic(rString), + Config: testAccThingGroupMembershipConfig_basic(rString), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_iot_thing_group_attachment.test_attachment", "thing_name", fmt.Sprintf("test_thing_%s", rString)), - resource.TestCheckResourceAttr("aws_iot_thing_group_attachment.test_attachment", "thing_group_name", fmt.Sprintf("test_group_%s", rString)), - resource.TestCheckResourceAttr("aws_iot_thing_group_attachment.test_attachment", "override_dynamics_group", "false"), - testAccAWSIotThingGroupAttachmentExists_basic(rString), + resource.TestCheckResourceAttr("aws_iot_thing_group_membership.test_attachment", "thing_name", fmt.Sprintf("test_thing_%s", rString)), + resource.TestCheckResourceAttr("aws_iot_thing_group_membership.test_attachment", "thing_group_name", fmt.Sprintf("test_group_%s", rString)), + resource.TestCheckResourceAttr("aws_iot_thing_group_membership.test_attachment", "override_dynamics_group", "false"), + testAccCheckThingGroupMembershipExists(rString), ), }, { - ResourceName: "aws_iot_thing_group_attachment.test_attachment", - ImportStateIdFunc: testAccAWSIotThingGroupAttachmentImportStateIdFunc("aws_iot_thing_group_attachment.test_attachment"), + ResourceName: "aws_iot_thing_group_membership.test_attachment", + ImportStateIdFunc: testAccCheckThingGroupMembershipImportStateIdFunc("aws_iot_thing_group_membership.test_attachment"), ImportState: true, // We do not have a way to align IDs since the Create function uses resource.PrefixedUniqueId() // Failed state verification, resource with ID ROLE-POLICYARN not found @@ -39,19 +44,19 @@ func TestAccAWSIotThingGroupAttachment_basic(t *testing.T) { }) } -func testAccAWSIotThingGroupAttachmentExists_basic(rString string) resource.TestCheckFunc { +func testAccCheckThingGroupMembershipExists(rString string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).iotconn + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iot_thing_group_attachment" { + if rs.Type != "aws_iot_thing_group_membership" { continue } thingName := rs.Primary.Attributes["thing_name"] thingGroupName := rs.Primary.Attributes["thing_group_name"] - hasThingGroup, err := iotThingHasThingGroup(conn, thingName, thingGroupName, "") + hasThingGroup, err := tfiot.IotThingHasThingGroup(conn, thingName, thingGroupName, "") if err != nil { return err @@ -67,20 +72,20 @@ func testAccAWSIotThingGroupAttachmentExists_basic(rString string) resource.Test } } -func testAccCheckAWSIotThingGroupAttachmentDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).iotconn +func testAccCheckThingGroupMembershipDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iot_thing_group_attachment" { + if rs.Type != "aws_iot_thing_group_membership" { continue } thingName := rs.Primary.Attributes["thing_name"] thingGroupName := rs.Primary.Attributes["thing_group_name"] - hasThingGroup, err := iotThingHasThingGroup(conn, thingName, thingGroupName, "") + hasThingGroup, err := tfiot.IotThingHasThingGroup(conn, thingName, thingGroupName, "") - if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { return nil } @@ -95,7 +100,7 @@ func testAccCheckAWSIotThingGroupAttachmentDestroy(s *terraform.State) error { return nil } -func testAccAWSIotThingGroupAttachmentImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { +func testAccCheckThingGroupMembershipImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -106,7 +111,7 @@ func testAccAWSIotThingGroupAttachmentImportStateIdFunc(resourceName string) res } } -func testAccAWSIotThingGroupAttachmentConfig_basic(rString string) string { +func testAccThingGroupMembershipConfig_basic(rString string) string { return fmt.Sprintf(` resource "aws_iot_thing" "test_thing" { name = "test_thing_%s" @@ -122,7 +127,7 @@ resource "aws_iot_thing_group" "test_thing_group" { } } -resource "aws_iot_thing_group_attachment" "test_attachment" { +resource "aws_iot_thing_group_membership" "test_attachment" { thing_name = "${aws_iot_thing.test_thing.name}" thing_group_name = "${aws_iot_thing_group.test_thing_group.name}" override_dynamics_group = false diff --git a/internal/service/iot/thing_group_test.go b/internal/service/iot/thing_group_test.go index 6eb0ace55746..e768c1e28744 100644 --- a/internal/service/iot/thing_group_test.go +++ b/internal/service/iot/thing_group_test.go @@ -1,4 +1,4 @@ -package aws +package iot_test import ( "fmt" @@ -6,24 +6,28 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" ) -func TestAccAWSIotThingGroup_base(t *testing.T) { +func TestAccIoTThingGroup_base(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), @@ -44,20 +48,21 @@ func TestAccAWSIotThingGroup_base(t *testing.T) { }) } -func TestAccAWSIotThingGroup_full(t *testing.T) { +func TestAccIoTThingGroup_full(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) parentThingGroupName := thingGroupName + "_parent" resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { // BASE - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), @@ -75,7 +80,7 @@ func TestAccAWSIotThingGroup_full(t *testing.T) { ImportStateVerify: true, }, { // UPDATE full - Config: testAccAWSIotThingGroupConfig_full(thingGroupName, parentThingGroupName, "7", "this is my thing group", "myTag"), + Config: testAccThingGroupConfig_full(thingGroupName, parentThingGroupName, "7", "this is my thing group", "myTag"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), @@ -93,7 +98,7 @@ func TestAccAWSIotThingGroup_full(t *testing.T) { ), }, { // DELETE full - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), @@ -109,19 +114,20 @@ func TestAccAWSIotThingGroup_full(t *testing.T) { }) } -func TestAccAWSIotThingGroup_name(t *testing.T) { +func TestAccIoTThingGroup_name(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { // CREATE - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), @@ -133,7 +139,7 @@ func TestAccAWSIotThingGroup_name(t *testing.T) { ImportStateVerify: true, }, { // UPDATE - Config: testAccAWSIotThingGroupConfig_base(thingGroupName + "_updated"), + Config: testAccThingGroupConfig_base(thingGroupName + "_updated"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "name", thingGroupName+"_updated"), @@ -143,19 +149,20 @@ func TestAccAWSIotThingGroup_name(t *testing.T) { }) } -func TestAccAWSIotThingGroup_tags(t *testing.T) { +func TestAccIoTThingGroup_tags(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { // BASE - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -167,7 +174,7 @@ func TestAccAWSIotThingGroup_tags(t *testing.T) { ImportStateVerify: true, }, { // CREATE Tags - Config: testAccAWSIotThingGroupConfig_withTags(thingGroupName, "myTag"), + Config: testAccThingGroupConfig_withTags(thingGroupName, "myTag"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -175,7 +182,7 @@ func TestAccAWSIotThingGroup_tags(t *testing.T) { ), }, { // UPDATE Tags - Config: testAccAWSIotThingGroupConfig_withTags(thingGroupName, "myUpdatedTag"), + Config: testAccThingGroupConfig_withTags(thingGroupName, "myUpdatedTag"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -183,7 +190,7 @@ func TestAccAWSIotThingGroup_tags(t *testing.T) { ), }, { // DELETE Tags - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -193,19 +200,20 @@ func TestAccAWSIotThingGroup_tags(t *testing.T) { }) } -func TestAccAWSIotThingGroup_propsAttr(t *testing.T) { +func TestAccIoTThingGroup_propsAttr(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { // BASE - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -217,7 +225,7 @@ func TestAccAWSIotThingGroup_propsAttr(t *testing.T) { ImportStateVerify: true, }, { // CREATE Properties - Config: testAccAWSIotThingGroupConfig_withPropAttr(thingGroupName, "42"), + Config: testAccThingGroupConfig_withPropAttr(thingGroupName, "42"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -229,7 +237,7 @@ func TestAccAWSIotThingGroup_propsAttr(t *testing.T) { ), }, { // UPDATE Properties - Config: testAccAWSIotThingGroupConfig_withPropAttr(thingGroupName, "7"), + Config: testAccThingGroupConfig_withPropAttr(thingGroupName, "7"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -241,7 +249,7 @@ func TestAccAWSIotThingGroup_propsAttr(t *testing.T) { ), }, { // DELETE Properties - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -251,19 +259,20 @@ func TestAccAWSIotThingGroup_propsAttr(t *testing.T) { }) } -func TestAccAWSIotThingGroup_propsDesc(t *testing.T) { +func TestAccIoTThingGroup_propsDesc(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { // BASE - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -275,7 +284,7 @@ func TestAccAWSIotThingGroup_propsDesc(t *testing.T) { ImportStateVerify: true, }, { // CREATE Properties - Config: testAccAWSIotThingGroupConfig_withPropDesc(thingGroupName, "this is my thing group"), + Config: testAccThingGroupConfig_withPropDesc(thingGroupName, "this is my thing group"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties.0.attributes"), @@ -283,7 +292,7 @@ func TestAccAWSIotThingGroup_propsDesc(t *testing.T) { ), }, { // UPDATE Properties - Config: testAccAWSIotThingGroupConfig_withPropDesc(thingGroupName, "this is my updated thing group"), + Config: testAccThingGroupConfig_withPropDesc(thingGroupName, "this is my updated thing group"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties.0.attributes"), @@ -291,7 +300,7 @@ func TestAccAWSIotThingGroup_propsDesc(t *testing.T) { ), }, { // DELETE Properties - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -301,19 +310,20 @@ func TestAccAWSIotThingGroup_propsDesc(t *testing.T) { }) } -func TestAccAWSIotThingGroup_propsAll(t *testing.T) { +func TestAccIoTThingGroup_propsAll(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { // BASE - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -325,7 +335,7 @@ func TestAccAWSIotThingGroup_propsAll(t *testing.T) { ImportStateVerify: true, }, { // CREATE Properties - Config: testAccAWSIotThingGroupConfig_withPropAll(thingGroupName, "42", "this is my thing group"), + Config: testAccThingGroupConfig_withPropAll(thingGroupName, "42", "this is my thing group"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -337,7 +347,7 @@ func TestAccAWSIotThingGroup_propsAll(t *testing.T) { ), }, { // UPDATE Properties - Config: testAccAWSIotThingGroupConfig_withPropAll(thingGroupName, "7", "this is my updated thing group"), + Config: testAccThingGroupConfig_withPropAll(thingGroupName, "7", "this is my updated thing group"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -349,7 +359,7 @@ func TestAccAWSIotThingGroup_propsAll(t *testing.T) { ), }, { // DELETE Properties - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "properties"), @@ -359,20 +369,21 @@ func TestAccAWSIotThingGroup_propsAll(t *testing.T) { }) } -func TestAccAWSIotThingGroup_parent(t *testing.T) { +func TestAccIoTThingGroup_parent(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := acctest.RandString(8) + rString := sdkacctest.RandString(8) thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) parentThingGroupName := thingGroupName + "_parent" resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIotThingGroupDestroy, + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckNoResourceAttr(resourceName, "parent_group_name"), @@ -384,21 +395,21 @@ func TestAccAWSIotThingGroup_parent(t *testing.T) { ImportStateVerify: true, }, { // CREATE parent_group_name - Config: testAccAWSIotThingGroupConfig_withParent(thingGroupName, parentThingGroupName), + Config: testAccThingGroupConfig_withParent(thingGroupName, parentThingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName), ), }, { // UPDATE parent_group_name - Config: testAccAWSIotThingGroupConfig_withParent(thingGroupName, parentThingGroupName+"_updated"), + Config: testAccThingGroupConfig_withParent(thingGroupName, parentThingGroupName+"_updated"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName+"_updated"), ), }, { // DELETE parent_group_name - Config: testAccAWSIotThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig_base(thingGroupName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), @@ -419,7 +430,8 @@ func testAccCheckIotThingGroupExists(n string, thing *iot.DescribeThingGroupOutp return fmt.Errorf("no IoT Thing Group ID is set") } - conn := testAccProvider.Meta().(*AWSClient).iotconn + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn + input := &iot.DescribeThingGroupInput{ ThingGroupName: aws.String(rs.Primary.ID), } @@ -434,8 +446,8 @@ func testAccCheckIotThingGroupExists(n string, thing *iot.DescribeThingGroupOutp } } -func testAccCheckAWSIotThingGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).iotconn +func testAccCheckThingGroupDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_thing_group" { @@ -448,7 +460,7 @@ func testAccCheckAWSIotThingGroupDestroy(s *terraform.State) error { _, err := conn.DescribeThingGroup(input) if err != nil { - if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { return nil } return err @@ -458,7 +470,7 @@ func testAccCheckAWSIotThingGroupDestroy(s *terraform.State) error { return nil } -func testAccAWSIotThingGroupConfig_base(thingGroupName string) string { +func testAccThingGroupConfig_base(thingGroupName string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { name = "%s" @@ -466,7 +478,7 @@ resource "aws_iot_thing_group" "test" { `, thingGroupName) } -func testAccAWSIotThingGroupConfig_full(thingGroupName, parentThingGroupName, answer, description, tagValue string) string { +func testAccThingGroupConfig_full(thingGroupName, parentThingGroupName, answer, description, tagValue string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "parent" { name = "%s" @@ -492,7 +504,7 @@ resource "aws_iot_thing_group" "test" { `, parentThingGroupName, thingGroupName, answer, description, tagValue) } -func testAccAWSIotThingGroupConfig_withTags(thingGroupName, tagValue string) string { +func testAccThingGroupConfig_withTags(thingGroupName, tagValue string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { name = "%s" @@ -504,7 +516,7 @@ resource "aws_iot_thing_group" "test" { `, thingGroupName, tagValue) } -func testAccAWSIotThingGroupConfig_withPropAttr(thingGroupName, answer string) string { +func testAccThingGroupConfig_withPropAttr(thingGroupName, answer string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { name = "%s" @@ -521,7 +533,7 @@ resource "aws_iot_thing_group" "test" { `, thingGroupName, answer) } -func testAccAWSIotThingGroupConfig_withPropDesc(thingGroupName, description string) string { +func testAccThingGroupConfig_withPropDesc(thingGroupName, description string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { name = "%s" @@ -534,7 +546,7 @@ resource "aws_iot_thing_group" "test" { `, thingGroupName, description) } -func testAccAWSIotThingGroupConfig_withPropAll(thingGroupName, answer, description string) string { +func testAccThingGroupConfig_withPropAll(thingGroupName, answer, description string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { name = "%s" @@ -552,7 +564,7 @@ resource "aws_iot_thing_group" "test" { `, thingGroupName, answer, description) } -func testAccAWSIotThingGroupConfig_withParent(thingGroupName, parentThingGroupName string) string { +func testAccThingGroupConfig_withParent(thingGroupName, parentThingGroupName string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "parent" { name = "%s" From 5f7ab6cf1657d17f4ae6b67dbf8da5f3440f479c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 16 Nov 2021 15:39:05 -0500 Subject: [PATCH 232/304] 'aws_iot_thing_group_attachment' -> 'aws_iot_thing_group_membership'. --- .../docs/r/iot_thing_group_membership.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/r/iot_thing_group_membership.html.markdown b/website/docs/r/iot_thing_group_membership.html.markdown index 437d59e8e61c..4a4348406e8d 100644 --- a/website/docs/r/iot_thing_group_membership.html.markdown +++ b/website/docs/r/iot_thing_group_membership.html.markdown @@ -1,18 +1,18 @@ --- layout: "aws" -page_title: "AWS: aws_iot_thing_group_attachment" +page_title: "AWS: aws_iot_thing_group_membership" description: |- Allow to add IoT Thing to IoT Thing Group. --- -# Resource: aws_iot_thing_group_attachment +# Resource: aws_iot_thing_group_membership Allow to add IoT Thing to IoT Thing Group. ## Example Usage ```hcl -resource "aws_iot_thing_group_attachment" "test_attachment" { +resource "aws_iot_thing_group_membership" "test_attachment" { thing_name = "test_thing_name" thing_group_name = "test_thing_group_name" override_dynamics_group = false @@ -27,8 +27,8 @@ resource "aws_iot_thing_group_attachment" "test_attachment" { ## Import -IOT Thing Group Attachment can be imported using the name of thing and thing group. +IOT Thing Group Membership can be imported using the name of thing and thing group. ``` -$ terraform import aws_iot_thing_group_attachment.test_attachment thing_name/thing_group +$ terraform import aws_iot_thing_group_membership.example thing_name/thing_group ``` From 2481f6c916fe6b7a51aefb0577db8828666a184c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 16 Nov 2021 20:34:50 -0500 Subject: [PATCH 233/304] make: Change PKG_NAME to PKG --- GNUmakefile | 14 +++++++------- docs/contributing/contribution-checklists.md | 4 ++-- .../running-and-writing-acceptance-tests.md | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index bafced17bbbf..f4675e0cda23 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,7 +1,7 @@ SWEEP?=us-west-2,us-east-1,us-east-2 TEST?=./... SWEEP_DIR?=./internal/sweep -PKG_NAME=internal +PKG=internal TEST_COUNT?=1 ACCTEST_TIMEOUT?=180m ACCTEST_PARALLELISM?=20 @@ -34,11 +34,11 @@ testacc: fmtcheck echo "See the contributing guide for more information: https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md"; \ exit 1; \ fi - TF_ACC=1 go test ./$(PKG_NAME)/... -v -count $(TEST_COUNT) -parallel $(ACCTEST_PARALLELISM) $(TESTARGS) -timeout $(ACCTEST_TIMEOUT) + TF_ACC=1 go test ./$(PKG)/... -v -count $(TEST_COUNT) -parallel $(ACCTEST_PARALLELISM) $(TESTARGS) -timeout $(ACCTEST_TIMEOUT) fmt: @echo "==> Fixing source code with gofmt..." - gofmt -s -w ./$(PKG_NAME) $(filter-out ./providerlint/go% ./providerlint/README.md ./providerlint/vendor, $(wildcard ./providerlint/*)) + gofmt -s -w ./$(PKG) $(filter-out ./providerlint/go% ./providerlint/README.md ./providerlint/vendor, $(wildcard ./providerlint/*)) # Currently required by tf-deploy compile fmtcheck: @@ -87,7 +87,7 @@ lint: golangci-lint providerlint importlint golangci-lint: @echo "==> Checking source code with golangci-lint..." - @golangci-lint run ./$(PKG_NAME)/... + @golangci-lint run ./$(PKG)/... providerlint: @echo "==> Checking source code with providerlint..." @@ -114,11 +114,11 @@ providerlint: -XR005=false \ -XS001=false \ -XS002=false \ - ./$(PKG_NAME)/service/... ./$(PKG_NAME)/provider/... + ./$(PKG)/service/... ./$(PKG)/provider/... importlint: @echo "==> Checking source code with importlint..." - @impi --local . --scheme stdThirdPartyLocal ./$(PKG_NAME)/... + @impi --local . --scheme stdThirdPartyLocal ./$(PKG)/... tools: cd providerlint && go install . @@ -133,7 +133,7 @@ tools: test-compile: @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make test-compile TEST=./$(PKG_NAME)"; \ + echo " make test-compile TEST=./$(PKG)"; \ exit 1; \ fi go test -c $(TEST) $(TESTARGS) diff --git a/docs/contributing/contribution-checklists.md b/docs/contributing/contribution-checklists.md index 5b5df1cccf54..29368eb3c75a 100644 --- a/docs/contributing/contribution-checklists.md +++ b/docs/contributing/contribution-checklists.md @@ -547,7 +547,7 @@ More details about this code generation, including fixes for potential error mes } ``` -- Verify all acceptance testing passes for the resource (e.g., `make testacc TESTARGS='-run=TestAccEKSCluster_' PKG_NAME=internal/service/eks`) +- Verify all acceptance testing passes for the resource (e.g., `make testacc TESTARGS='-run=TestAccEKSCluster_' PKG=internal/service/eks`) ### Resource Tagging Documentation Implementation @@ -792,7 +792,7 @@ resource "aws_{service}_tag" "test" { } ``` -- Run `make testacc TESTARGS='-run=TestAcc{Service}Tags_' PKG_NAME=internal/service/{Service}` and ensure there are no failures. +- Run `make testacc TESTARGS='-run=TestAcc{Service}Tags_' PKG=internal/service/{Service}` and ensure there are no failures. - Create `website/docs/r/{service}_tag.html.markdown` with initial documentation similar to the following: ``````markdown diff --git a/docs/contributing/running-and-writing-acceptance-tests.md b/docs/contributing/running-and-writing-acceptance-tests.md index ae06a7714376..35d5a1acdf3b 100644 --- a/docs/contributing/running-and-writing-acceptance-tests.md +++ b/docs/contributing/running-and-writing-acceptance-tests.md @@ -94,7 +94,7 @@ Tests can then be run by specifying a regular expression defining the tests to run and the package in which the tests are defined: ```sh -$ make testacc TESTARGS='-run=TestAccCloudWatchDashboard_updateName' PKG_NAME=internal/service/cloudwatch +$ make testacc TESTARGS='-run=TestAccCloudWatchDashboard_updateName' PKG=internal/service/cloudwatch ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/cloudwatch/... -v -count 1 -parallel 20 -run=TestAccCloudWatchDashboard_updateName -timeout 180m === RUN TestAccCloudWatchDashboard_updateName @@ -111,7 +111,7 @@ write the regular expression. For example, to run all tests of the can start testing like this: ```sh -$ make testacc TESTARGS='-run=TestAccCloudWatchDashboard' PKG_NAME=internal/service/cloudwatch +$ make testacc TESTARGS='-run=TestAccCloudWatchDashboard' PKG=internal/service/cloudwatch ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/cloudwatch/... -v -count 1 -parallel 20 -run=TestAccCloudWatchDashboard -timeout 180m === RUN TestAccCloudWatchDashboard_basic @@ -141,7 +141,7 @@ Please Note: On macOS 10.14 and later (and some Linux distributions), the defaul Certain testing requires multiple AWS accounts. This additional setup is not typically required and the testing will return an error (shown below) if your current setup does not have the secondary AWS configuration: ```console -$ make testacc TESTARGS='-run=TestAccRDSInstance_DBSubnetGroupName_ramShared' PKG_NAME=internal/service/rds +$ make testacc TESTARGS='-run=TestAccRDSInstance_DBSubnetGroupName_ramShared' PKG=internal/service/rds TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 20 -run=TestAccRDSInstance_DBSubnetGroupName_ramShared -timeout 180m === RUN TestAccRDSInstance_DBSubnetGroupName_ramShared === PAUSE TestAccRDSInstance_DBSubnetGroupName_ramShared From 8d2bc3b9034e7311d7957b59ca6cb50910e5a0aa Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 16 Nov 2021 21:09:44 -0500 Subject: [PATCH 234/304] make: Add PKG and TESTS --- GNUmakefile | 34 ++++++++++++------- docs/contributing/contribution-checklists.md | 4 +-- .../running-and-writing-acceptance-tests.md | 6 ++-- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index f4675e0cda23..84b4b0bae833 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,10 +1,18 @@ -SWEEP?=us-west-2,us-east-1,us-east-2 -TEST?=./... -SWEEP_DIR?=./internal/sweep -PKG=internal -TEST_COUNT?=1 -ACCTEST_TIMEOUT?=180m -ACCTEST_PARALLELISM?=20 +SWEEP ?= us-west-2,us-east-1,us-east-2 +TEST ?= ./... +SWEEP_DIR ?= ./internal/sweep +PKG_NAME ?= internal +TEST_COUNT ?= 1 +ACCTEST_TIMEOUT ?= 180m +ACCTEST_PARALLELISM ?= 20 + +ifneq ($(origin PKG), undefined) + PKG_NAME = internal/service/$(PKG) +endif + +ifneq ($(origin TESTS), undefined) + TESTARGS = -run='$(TESTS)' +endif default: build @@ -34,11 +42,11 @@ testacc: fmtcheck echo "See the contributing guide for more information: https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md"; \ exit 1; \ fi - TF_ACC=1 go test ./$(PKG)/... -v -count $(TEST_COUNT) -parallel $(ACCTEST_PARALLELISM) $(TESTARGS) -timeout $(ACCTEST_TIMEOUT) + TF_ACC=1 go test ./$(PKG_NAME)/... -v -count $(TEST_COUNT) -parallel $(ACCTEST_PARALLELISM) $(TESTARGS) -timeout $(ACCTEST_TIMEOUT) fmt: @echo "==> Fixing source code with gofmt..." - gofmt -s -w ./$(PKG) $(filter-out ./providerlint/go% ./providerlint/README.md ./providerlint/vendor, $(wildcard ./providerlint/*)) + gofmt -s -w ./$(PKG_NAME) $(filter-out ./providerlint/go% ./providerlint/README.md ./providerlint/vendor, $(wildcard ./providerlint/*)) # Currently required by tf-deploy compile fmtcheck: @@ -87,7 +95,7 @@ lint: golangci-lint providerlint importlint golangci-lint: @echo "==> Checking source code with golangci-lint..." - @golangci-lint run ./$(PKG)/... + @golangci-lint run ./$(PKG_NAME)/... providerlint: @echo "==> Checking source code with providerlint..." @@ -114,11 +122,11 @@ providerlint: -XR005=false \ -XS001=false \ -XS002=false \ - ./$(PKG)/service/... ./$(PKG)/provider/... + ./$(PKG_NAME)/service/... ./$(PKG_NAME)/provider/... importlint: @echo "==> Checking source code with importlint..." - @impi --local . --scheme stdThirdPartyLocal ./$(PKG)/... + @impi --local . --scheme stdThirdPartyLocal ./$(PKG_NAME)/... tools: cd providerlint && go install . @@ -133,7 +141,7 @@ tools: test-compile: @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make test-compile TEST=./$(PKG)"; \ + echo " make test-compile TEST=./$(PKG_NAME)"; \ exit 1; \ fi go test -c $(TEST) $(TESTARGS) diff --git a/docs/contributing/contribution-checklists.md b/docs/contributing/contribution-checklists.md index 29368eb3c75a..58e9e51dec5d 100644 --- a/docs/contributing/contribution-checklists.md +++ b/docs/contributing/contribution-checklists.md @@ -547,7 +547,7 @@ More details about this code generation, including fixes for potential error mes } ``` -- Verify all acceptance testing passes for the resource (e.g., `make testacc TESTARGS='-run=TestAccEKSCluster_' PKG=internal/service/eks`) +- Verify all acceptance testing passes for the resource (e.g., `make testacc TESTS=TestAccEKSCluster_ PKG=eks`) ### Resource Tagging Documentation Implementation @@ -792,7 +792,7 @@ resource "aws_{service}_tag" "test" { } ``` -- Run `make testacc TESTARGS='-run=TestAcc{Service}Tags_' PKG=internal/service/{Service}` and ensure there are no failures. +- Run `make testacc TESTS=TestAcc{Service}Tags_ PKG={Service}` and ensure there are no failures. - Create `website/docs/r/{service}_tag.html.markdown` with initial documentation similar to the following: ``````markdown diff --git a/docs/contributing/running-and-writing-acceptance-tests.md b/docs/contributing/running-and-writing-acceptance-tests.md index 35d5a1acdf3b..627e51738c5a 100644 --- a/docs/contributing/running-and-writing-acceptance-tests.md +++ b/docs/contributing/running-and-writing-acceptance-tests.md @@ -94,7 +94,7 @@ Tests can then be run by specifying a regular expression defining the tests to run and the package in which the tests are defined: ```sh -$ make testacc TESTARGS='-run=TestAccCloudWatchDashboard_updateName' PKG=internal/service/cloudwatch +$ make testacc TESTS=TestAccCloudWatchDashboard_updateName PKG=cloudwatch ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/cloudwatch/... -v -count 1 -parallel 20 -run=TestAccCloudWatchDashboard_updateName -timeout 180m === RUN TestAccCloudWatchDashboard_updateName @@ -111,7 +111,7 @@ write the regular expression. For example, to run all tests of the can start testing like this: ```sh -$ make testacc TESTARGS='-run=TestAccCloudWatchDashboard' PKG=internal/service/cloudwatch +$ make testacc TESTS=TestAccCloudWatchDashboard PKG=cloudwatch ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/cloudwatch/... -v -count 1 -parallel 20 -run=TestAccCloudWatchDashboard -timeout 180m === RUN TestAccCloudWatchDashboard_basic @@ -141,7 +141,7 @@ Please Note: On macOS 10.14 and later (and some Linux distributions), the defaul Certain testing requires multiple AWS accounts. This additional setup is not typically required and the testing will return an error (shown below) if your current setup does not have the secondary AWS configuration: ```console -$ make testacc TESTARGS='-run=TestAccRDSInstance_DBSubnetGroupName_ramShared' PKG=internal/service/rds +$ make testacc TESTS=TestAccRDSInstance_DBSubnetGroupName_ramShared PKG=rds TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 20 -run=TestAccRDSInstance_DBSubnetGroupName_ramShared -timeout 180m === RUN TestAccRDSInstance_DBSubnetGroupName_ramShared === PAUSE TestAccRDSInstance_DBSubnetGroupName_ramShared From 0b777926ac884cb731399bdc7bc9a9125b6db811 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 16 Nov 2021 21:14:28 -0500 Subject: [PATCH 235/304] Update PR template --- .github/PULL_REQUEST_TEMPLATE.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 7e719c324c13..fea56fd896b2 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -17,10 +17,12 @@ Output from acceptance testing: ``` -$ make testacc TESTARGS='-run=TestAccXXX' +$ make testacc TESTS=TestAccXXX PKG=ec2 ... ``` From 7b98fdbe67c35166d7f9322b86680312ec078f01 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 17 Nov 2021 02:56:33 +0000 Subject: [PATCH 236/304] Update CHANGELOG.md for #21802 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c08aa6d0aaeb..8a574ca5f33f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ ENHANCEMENTS: * resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) * resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support resource tagging ([#21769](https://github.com/hashicorp/terraform-provider-aws/issues/21769)) * resource/aws_neptune_cluster: Support in-place update of `engine_version` ([#21760](https://github.com/hashicorp/terraform-provider-aws/issues/21760)) +* resource/aws_route53_resolver_dnssec_config: Increase resource creation and deletion timeouts to 10 minutes ([#21797](https://github.com/hashicorp/terraform-provider-aws/issues/21797)) * resource/aws_sagemaker_endpoint: Add `deployment_config` argument ([#21765](https://github.com/hashicorp/terraform-provider-aws/issues/21765)) BUG FIXES: From 583516783d55a0ea2cff88976f86a674d8b26cef Mon Sep 17 00:00:00 2001 From: Shunsuke Tokunaga Date: Thu, 18 Nov 2021 02:12:34 +0900 Subject: [PATCH 237/304] Add description for target type alb (#21812) --- website/docs/r/lb_target_group_attachment.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/lb_target_group_attachment.html.markdown b/website/docs/r/lb_target_group_attachment.html.markdown index 1af3a61c71f5..b134fa3ced20 100644 --- a/website/docs/r/lb_target_group_attachment.html.markdown +++ b/website/docs/r/lb_target_group_attachment.html.markdown @@ -63,7 +63,7 @@ resource "aws_lb_target_group_attachment" "test" { The following arguments are supported: * `target_group_arn` - (Required) The ARN of the target group with which to register targets -* `target_id` (Required) The ID of the target. This is the Instance ID for an instance, or the container ID for an ECS container. If the target type is ip, specify an IP address. If the target type is lambda, specify the arn of lambda. +* `target_id` (Required) The ID of the target. This is the Instance ID for an instance, or the container ID for an ECS container. If the target type is ip, specify an IP address. If the target type is lambda, specify the arn of lambda. If the target type is alb, specify the arn of alb. * `port` - (Optional) The port on which targets receive traffic. * `availability_zone` - (Optional) The Availability Zone where the IP address of the target is to be registered. If the private ip address is outside of the VPC scope, this value must be set to 'all'. From 2f6b948caa8ece13e3a72ae34fe723f03d29bca9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 17 Nov 2021 12:44:41 -0500 Subject: [PATCH 238/304] Rename IoT finder source file. --- internal/service/iot/{iot.go => find.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename internal/service/iot/{iot.go => find.go} (100%) diff --git a/internal/service/iot/iot.go b/internal/service/iot/find.go similarity index 100% rename from internal/service/iot/iot.go rename to internal/service/iot/find.go From 3d8baf737000467ad1b90367a19bac72a6d6b777 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 17 Nov 2021 12:49:27 -0500 Subject: [PATCH 239/304] 'AuthorizerByName' -> 'FindAuthorizerByName'. --- internal/service/iot/authorizer.go | 2 +- internal/service/iot/authorizer_test.go | 4 ++-- internal/service/iot/find.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/iot/authorizer.go b/internal/service/iot/authorizer.go index 25de2318ec97..edecc1b02ce6 100644 --- a/internal/service/iot/authorizer.go +++ b/internal/service/iot/authorizer.go @@ -112,7 +112,7 @@ func resourceAuthorizerCreate(d *schema.ResourceData, meta interface{}) error { func resourceAuthorizerRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - authorizer, err := AuthorizerByName(conn, d.Id()) + authorizer, err := FindAuthorizerByName(conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Authorizer (%s) not found, removing from state", d.Id()) diff --git a/internal/service/iot/authorizer_test.go b/internal/service/iot/authorizer_test.go index b1c36c77e374..f93306f9474a 100644 --- a/internal/service/iot/authorizer_test.go +++ b/internal/service/iot/authorizer_test.go @@ -157,7 +157,7 @@ func testAccCheckAuthorizerExists(n string, v *iot.AuthorizerDescription) resour conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn - output, err := tfiot.AuthorizerByName(conn, rs.Primary.ID) + output, err := tfiot.FindAuthorizerByName(conn, rs.Primary.ID) if err != nil { return err @@ -177,7 +177,7 @@ func testAccCheckAuthorizerDestroy(s *terraform.State) error { continue } - _, err := tfiot.AuthorizerByName(conn, rs.Primary.ID) + _, err := tfiot.FindAuthorizerByName(conn, rs.Primary.ID) if tfresource.NotFound(err) { continue diff --git a/internal/service/iot/find.go b/internal/service/iot/find.go index f5cf9528f88c..131ced0abec0 100644 --- a/internal/service/iot/find.go +++ b/internal/service/iot/find.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func AuthorizerByName(conn *iot.IoT, name string) (*iot.AuthorizerDescription, error) { +func FindAuthorizerByName(conn *iot.IoT, name string) (*iot.AuthorizerDescription, error) { input := &iot.DescribeAuthorizerInput{ AuthorizerName: aws.String(name), } From b01b0863ded07986f6e9290a819de5be8fc493fc Mon Sep 17 00:00:00 2001 From: bozerkins Date: Thu, 2 Sep 2021 16:11:06 +0300 Subject: [PATCH 240/304] support for DynamicPartitioningConfiguration in firehouse resource --- internal/service/firehose/delivery_stream.go | 91 ++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index d58869f6d270..4d730487ff6f 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -58,6 +58,38 @@ func cloudWatchLoggingOptionsSchema() *schema.Schema { } } +func dynamicPartitioningConfigurationSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "retry_options": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "duration_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: validation.IntBetween(0, 7200), + }, + }, + }, + }, + }, + }, + } +} + func requestConfigurationSchema() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, @@ -278,6 +310,7 @@ func flattenFirehoseExtendedS3Configuration(description *firehose.ExtendedS3Dest "error_output_prefix": aws.StringValue(description.ErrorOutputPrefix), "prefix": aws.StringValue(description.Prefix), "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), + "dynamic_partitioning_configuration": flattenDynamicPartitioningConfiguration(description.DynamicPartitioningConfiguration), "role_arn": aws.StringValue(description.RoleARN), "s3_backup_configuration": flattenFirehoseS3Configuration(description.S3BackupDescription), "s3_backup_mode": aws.StringValue(description.S3BackupMode), @@ -664,6 +697,28 @@ func flattenProcessingConfiguration(pc *firehose.ProcessingConfiguration, roleAr return processingConfiguration } +func flattenDynamicPartitioningConfiguration(dpc *firehose.DynamicPartitioningConfiguration) []map[string]interface{} { + if dpc == nil { + return []map[string]interface{}{} + } + + dynamicPartitioningConfiguration := make([]map[string]interface{}, 1) + + retryOptions := make([]map[string]interface{}, 1) + if dpc.RetryOptions != nil && dpc.RetryOptions.DurationInSeconds != nil { + retryOptions[0] = map[string]interface{}{ + "duration_in_seconds": int(aws.Int64Value(dpc.RetryOptions.DurationInSeconds)), + } + } + + dynamicPartitioningConfiguration[0] = map[string]interface{}{ + "enabled": aws.Bool(*dpc.Enabled), + "retry_options": retryOptions, + } + + return dynamicPartitioningConfiguration +} + func flattenFirehoseKinesisSourceConfiguration(desc *firehose.KinesisStreamSourceDescription) []interface{} { if desc == nil { return []interface{}{} @@ -1215,6 +1270,8 @@ func ResourceDeliveryStream() *schema.Resource { "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), + "dynamic_partitioning_configuration": dynamicPartitioningConfigurationSchema(), + "processing_configuration": processingConfigurationSchema(), }, }, @@ -1627,6 +1684,10 @@ func createExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat configuration.ProcessingConfiguration = extractProcessingConfiguration(s3) } + if _, ok := s3["dynamic_partitioning_configuration"]; ok { + configuration.DynamicPartitioningConfiguration = extractDynamicPartitioningConfiguration(s3) + } + if _, ok := s3["cloudwatch_logging_options"]; ok { configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) } @@ -1712,6 +1773,10 @@ func updateExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat ProcessingConfiguration: extractProcessingConfiguration(s3), } + if _, ok := s3["dynamic_partitioning_configuration"]; ok { + configuration.DynamicPartitioningConfiguration = extractDynamicPartitioningConfiguration(s3) + } + if _, ok := s3["cloudwatch_logging_options"]; ok { configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) } @@ -1906,6 +1971,32 @@ func expandFirehoseSchemaConfiguration(l []interface{}) *firehose.SchemaConfigur return config } +func extractDynamicPartitioningConfiguration(s3 map[string]interface{}) *firehose.DynamicPartitioningConfiguration { + config := s3["dynamic_partitioning_configuration"].([]interface{}) + if len(config) == 0 { + return nil + } + + dynamicPartitioningConfig := config[0].(map[string]interface{}) + DynamicPartitioningConfiguration := &firehose.DynamicPartitioningConfiguration{ + Enabled: aws.Bool(dynamicPartitioningConfig["enabled"].(bool)), + } + + if retryOptions, ok := dynamicPartitioningConfig["retry_options"]; ok { + DynamicPartitioningConfiguration.RetryOptions = extractRetryOptions(retryOptions.([]interface{})) + } + + return DynamicPartitioningConfiguration +} + +func extractRetryOptions(ro []interface{}) *firehose.RetryOptions { + options := ro[0].(map[string]interface{}) + + return &firehose.RetryOptions{ + DurationInSeconds: aws.Int64(int64(options["duration_in_seconds"].(int))), + } +} + func extractProcessingConfiguration(s3 map[string]interface{}) *firehose.ProcessingConfiguration { config := s3["processing_configuration"].([]interface{}) if len(config) == 0 || config[0] == nil { From 3c941982fc96767e8bf910ea3ed92541be72b5b3 Mon Sep 17 00:00:00 2001 From: Alex Novak Date: Wed, 3 Nov 2021 13:43:14 -0400 Subject: [PATCH 241/304] Resolve conflict --- internal/service/firehose/delivery_stream.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index 4d730487ff6f..3887b5af1ea8 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -69,6 +69,7 @@ func dynamicPartitioningConfigurationSchema() *schema.Schema { Type: schema.TypeBool, Optional: true, Default: false, + ForceNew: true, }, "retry_options": { Type: schema.TypeList, @@ -325,6 +326,7 @@ func flattenFirehoseExtendedS3Configuration(description *firehose.ExtendedS3Dest m["kms_key_arn"] = aws.StringValue(description.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) } + log.Printf("Value of the extended s3 is %+v\n", m) return []map[string]interface{}{m} } @@ -1692,6 +1694,10 @@ func createExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) } + if _, ok := s3["dynamic_partitioning_configuration"]; ok { + configuration.DynamicPartitioningConfiguration = extractDynamicPartitioningConfiguration(s3) + } + if v, ok := s3["error_output_prefix"]; ok && v.(string) != "" { configuration.ErrorOutputPrefix = aws.String(v.(string)) } @@ -1781,6 +1787,10 @@ func updateExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) } + if _, ok := s3["dynamic_partitioning_configuration"]; ok { + configuration.DynamicPartitioningConfiguration = extractDynamicPartitioningConfiguration(s3) + } + if v, ok := s3["error_output_prefix"]; ok && v.(string) != "" { configuration.ErrorOutputPrefix = aws.String(v.(string)) } From d5177c0e0195d7d29bd39d3672aea804bcdceb71 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 4 Nov 2021 12:11:35 -0400 Subject: [PATCH 242/304] Remove log --- internal/service/firehose/delivery_stream.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index 3887b5af1ea8..1e0960a752bf 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -326,7 +326,6 @@ func flattenFirehoseExtendedS3Configuration(description *firehose.ExtendedS3Dest m["kms_key_arn"] = aws.StringValue(description.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) } - log.Printf("Value of the extended s3 is %+v\n", m) return []map[string]interface{}{m} } From b9e2b669293279e53320edf041833f137cf5f1ff Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 4 Nov 2021 12:14:49 -0400 Subject: [PATCH 243/304] Update allowed, ForceNew should not be necessary --- internal/service/firehose/delivery_stream.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index 1e0960a752bf..9246b5288087 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -69,7 +69,6 @@ func dynamicPartitioningConfigurationSchema() *schema.Schema { Type: schema.TypeBool, Optional: true, Default: false, - ForceNew: true, }, "retry_options": { Type: schema.TypeList, From 40844d765eda2923b2b9f5207649f22e78a6af7a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 4 Nov 2021 12:40:08 -0400 Subject: [PATCH 244/304] Add changelog --- .changelog/20769.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/20769.txt diff --git a/.changelog/20769.txt b/.changelog/20769.txt new file mode 100644 index 000000000000..ba8e58f5d00d --- /dev/null +++ b/.changelog/20769.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_kinesis_firehose_delivery_stream: Add `dynamic_partitioning_configuration` configuration block +``` \ No newline at end of file From fc00e6fcb8bd86617c7581c8a418f0653124eea5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 17 Nov 2021 13:50:37 -0500 Subject: [PATCH 245/304] Add and use 'FindThingGroupByName'. --- internal/service/iot/find.go | 25 ++++++++++++++++++ internal/service/iot/thing_group.go | 32 +++++++++++------------- internal/service/iot/thing_group_test.go | 32 +++++++++++------------- 3 files changed, 55 insertions(+), 34 deletions(-) diff --git a/internal/service/iot/find.go b/internal/service/iot/find.go index 131ced0abec0..aba3a4062954 100644 --- a/internal/service/iot/find.go +++ b/internal/service/iot/find.go @@ -32,3 +32,28 @@ func FindAuthorizerByName(conn *iot.IoT, name string) (*iot.AuthorizerDescriptio return output.AuthorizerDescription, nil } + +func FindThingGroupByName(conn *iot.IoT, name string) (*iot.DescribeThingGroupOutput, error) { + input := &iot.DescribeThingGroupInput{ + ThingGroupName: aws.String(name), + } + + output, err := conn.DescribeThingGroup(input) + + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} \ No newline at end of file diff --git a/internal/service/iot/thing_group.go b/internal/service/iot/thing_group.go index d043542736ef..f77ebfefb0d5 100644 --- a/internal/service/iot/thing_group.go +++ b/internal/service/iot/thing_group.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -139,35 +140,32 @@ func resourceThingGroupRead(d *schema.ResourceData, meta interface{}) error { defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - input := &iot.DescribeThingGroupInput{ - ThingGroupName: aws.String(d.Id()), + output, err := FindThingGroupByName(conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] IoT Thing Group (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } - log.Printf("[DEBUG] Reading IoT Thing Group: %s", input) - out, err := conn.DescribeThingGroup(input) if err != nil { - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] IoT Thing Group %q not found, removing from state", d.Id()) - d.SetId("") - } - return err + return fmt.Errorf("error reading IoT Thing Group (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] Received IoT Thing Group: %s", out) - d.Set("arn", out.ThingGroupArn) - d.Set("name", out.ThingGroupName) + d.Set("arn", output.ThingGroupArn) + d.Set("name", output.ThingGroupName) - if err := d.Set("metadata", flattenIotThingGroupMetadata(out.ThingGroupMetadata)); err != nil { + if err := d.Set("metadata", flattenIotThingGroupMetadata(output.ThingGroupMetadata)); err != nil { return fmt.Errorf("error setting metadata: %s", err) } - if err := d.Set("properties", flattenIotThingGroupProperties(out.ThingGroupProperties)); err != nil { + if err := d.Set("properties", flattenIotThingGroupProperties(output.ThingGroupProperties)); err != nil { return fmt.Errorf("error setting properties: %s", err) } - d.Set("version", out.Version) + d.Set("version", output.Version) - tags, err := ListTags(conn, aws.StringValue(out.ThingGroupArn)) + tags, err := ListTags(conn, d.Get("arn").(string)) if err != nil { - return fmt.Errorf("error listing tags for IoT Thing Group (%s): %w", aws.StringValue(out.ThingGroupArn), err) + return fmt.Errorf("error listing tags for IoT Thing Group (%s): %w", d.Get("arn").(string), err) } tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) diff --git a/internal/service/iot/thing_group_test.go b/internal/service/iot/thing_group_test.go index e768c1e28744..685ba4d05dcb 100644 --- a/internal/service/iot/thing_group_test.go +++ b/internal/service/iot/thing_group_test.go @@ -4,14 +4,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfiot "github.com/hashicorp/terraform-provider-aws/internal/service/iot" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccIoTThingGroup_base(t *testing.T) { @@ -419,28 +419,26 @@ func TestAccIoTThingGroup_parent(t *testing.T) { }) } -func testAccCheckIotThingGroupExists(n string, thing *iot.DescribeThingGroupOutput) resource.TestCheckFunc { +func testAccCheckIotThingGroupExists(n string, v *iot.DescribeThingGroupOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("not found: %s", n) + return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { - return fmt.Errorf("no IoT Thing Group ID is set") + return fmt.Errorf("No IoT Thing Group ID is set") } conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn - input := &iot.DescribeThingGroupInput{ - ThingGroupName: aws.String(rs.Primary.ID), - } - resp, err := conn.DescribeThingGroup(input) + output, err := tfiot.FindThingGroupByName(conn, rs.Primary.ID) + if err != nil { return err } - *thing = *resp + *v = *output return nil } @@ -454,19 +452,19 @@ func testAccCheckThingGroupDestroy(s *terraform.State) error { continue } - input := &iot.DescribeThingGroupInput{ - ThingGroupName: aws.String(rs.Primary.ID), + _, err := tfiot.FindThingGroupByName(conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue } - _, err := conn.DescribeThingGroup(input) if err != nil { - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return nil - } return err } - return fmt.Errorf("expected IoT Thing Group to be destroyed, %s found", rs.Primary.ID) + + return fmt.Errorf("IoT Thing Group %s still exists", rs.Primary.ID) } + return nil } From 6f2ffee06a20fbba1fb79251e7f7d6f46063634a Mon Sep 17 00:00:00 2001 From: Ahmad Samiei Date: Wed, 17 Nov 2021 19:56:12 +0100 Subject: [PATCH 246/304] Add id attribute to server_certificate_data doc (#21547) The `id` attribute is already present[1][2], but not documented. If not more, at least the `aws_cloudfront_distribution` resource needs[3] to use `aws_iam_server_certificate.id`, so it make sense to document it. [1] https://github.com/hashicorp/terraform-provider-aws/blob/187f1659a4fef11ac314567273b5470afe6b662f/internal/service/iam/server_certificate_data_source.go#L138 [2] https://github.com/hashicorp/terraform-provider-aws/blob/187f1659a4fef11ac314567273b5470afe6b662f/internal/service/iam/server_certificate_data_source_test.go#L56 [3] https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#iam_certificate_id --- website/docs/d/iam_server_certificate.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/d/iam_server_certificate.html.markdown b/website/docs/d/iam_server_certificate.html.markdown index 806c302bdd26..83aa4fcf1994 100644 --- a/website/docs/d/iam_server_certificate.html.markdown +++ b/website/docs/d/iam_server_certificate.html.markdown @@ -40,6 +40,7 @@ resource "aws_elb" "elb" { ## Attributes Reference +* `id` is set to the unique id of the IAM Server Certificate * `arn` is set to the ARN of the IAM Server Certificate * `path` is set to the path of the IAM Server Certificate * `expiration_date` is set to the expiration date of the IAM Server Certificate From 4db70fa4929a36f2207f62ffc17d859428bf09ff Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 16:02:59 -0500 Subject: [PATCH 247/304] tests/firehose: Add dynamic partitioning test --- internal/service/firehose/delivery_stream.go | 37 +++------ .../service/firehose/delivery_stream_test.go | 83 +++++++++++++++++++ 2 files changed, 96 insertions(+), 24 deletions(-) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index 9246b5288087..bf404c7c3b82 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -70,20 +70,11 @@ func dynamicPartitioningConfigurationSchema() *schema.Schema { Optional: true, Default: false, }, - "retry_options": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "duration_in_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - ValidateFunc: validation.IntBetween(0, 7200), - }, - }, - }, + "retry_duration": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: validation.IntBetween(0, 7200), }, }, }, @@ -704,16 +695,12 @@ func flattenDynamicPartitioningConfiguration(dpc *firehose.DynamicPartitioningCo dynamicPartitioningConfiguration := make([]map[string]interface{}, 1) - retryOptions := make([]map[string]interface{}, 1) - if dpc.RetryOptions != nil && dpc.RetryOptions.DurationInSeconds != nil { - retryOptions[0] = map[string]interface{}{ - "duration_in_seconds": int(aws.Int64Value(dpc.RetryOptions.DurationInSeconds)), - } + dynamicPartitioningConfiguration[0] = map[string]interface{}{ + "enabled": aws.BoolValue(dpc.Enabled), } - dynamicPartitioningConfiguration[0] = map[string]interface{}{ - "enabled": aws.Bool(*dpc.Enabled), - "retry_options": retryOptions, + if dpc.RetryOptions != nil && dpc.RetryOptions.DurationInSeconds != nil { + dynamicPartitioningConfiguration[0]["retry_duration"] = int(aws.Int64Value(dpc.RetryOptions.DurationInSeconds)) } return dynamicPartitioningConfiguration @@ -1990,8 +1977,10 @@ func extractDynamicPartitioningConfiguration(s3 map[string]interface{}) *firehos Enabled: aws.Bool(dynamicPartitioningConfig["enabled"].(bool)), } - if retryOptions, ok := dynamicPartitioningConfig["retry_options"]; ok { - DynamicPartitioningConfiguration.RetryOptions = extractRetryOptions(retryOptions.([]interface{})) + if retryDuration, ok := dynamicPartitioningConfig["retry_duration"]; ok { + DynamicPartitioningConfiguration.RetryOptions = &firehose.RetryOptions{ + DurationInSeconds: aws.Int64(int64(retryDuration.(int))), + } } return DynamicPartitioningConfiguration diff --git a/internal/service/firehose/delivery_stream_test.go b/internal/service/firehose/delivery_stream_test.go index b1afae3c8846..155bd42060e8 100644 --- a/internal/service/firehose/delivery_stream_test.go +++ b/internal/service/firehose/delivery_stream_test.go @@ -858,6 +858,42 @@ func TestAccFirehoseDeliveryStream_extendedS3KMSKeyARN(t *testing.T) { }) } +func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { + //rString := sdkacctest.RandString(8) + //funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) + //policyName := fmt.Sprintf("tf_acc_policy_%s", rString) + //roleName := fmt.Sprintf("tf_acc_role_%s", rString) + resourceName := "aws_kinesis_firehose_delivery_stream.test" + + var stream firehose.DeliveryStreamDescription + ri := sdkacctest.RandInt() + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + Steps: []resource.TestStep{ + { + Config: testAccKinesisFirehoseDeliveryStreamConfig_extendedS3DynamicPartitioning(rName, ri), + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), + resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.dynamic_partitioning_configuration.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { rString := sdkacctest.RandString(8) funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) @@ -2594,6 +2630,53 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } ` +func testAccKinesisFirehoseDeliveryStreamConfig_extendedS3DynamicPartitioning(rName string, rInt int) string { + return acctest.ConfigCompose( + testAccLambdaBasicConfig(rName, rName, rName), + fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamBaseConfig+` +resource "aws_kinesis_firehose_delivery_stream" "test" { + depends_on = [aws_iam_role_policy.firehose] + name = "terraform-kinesis-firehose-basictest-%d" + destination = "extended_s3" + + extended_s3_configuration { + role_arn = aws_iam_role.firehose.arn + bucket_arn = aws_s3_bucket.bucket.arn + prefix = "custom-prefix/customerId=!{partitionKeyFromLambda:customerId}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/" + error_output_prefix = "prefix1" + buffer_size = 64 + + dynamic_partitioning_configuration { + enabled = true + retry_duration = 300 + } + + processing_configuration { + enabled = true + + processors { + type = "Lambda" + + parameters { + parameter_name = "LambdaArn" + parameter_value = "${aws_lambda_function.lambda_function_test.arn}:$LATEST" + } + } + + processors { + type = "RecordDeAggregation" + + parameters { + parameter_name = "SubRecordType" + parameter_value = "JSON" + } + } + } + } +} +`, rInt, rInt, rInt, rInt)) +} + var testAccKinesisFirehoseDeliveryStreamConfig_extendedS3Updates_Initial = testAccKinesisFirehoseDeliveryStreamBaseConfig + ` resource "aws_kinesis_firehose_delivery_stream" "test" { depends_on = [aws_iam_role_policy.firehose] From 18cd3f5e9fbf482bcdec6c6de3f6a735543c3ebf Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 16:28:46 -0500 Subject: [PATCH 248/304] tests/firehose: Standardize --- .../service/firehose/delivery_stream_test.go | 570 ++++++++++-------- 1 file changed, 305 insertions(+), 265 deletions(-) diff --git a/internal/service/firehose/delivery_stream_test.go b/internal/service/firehose/delivery_stream_test.go index 155bd42060e8..6fc2bb51ea72 100644 --- a/internal/service/firehose/delivery_stream_test.go +++ b/internal/service/firehose/delivery_stream_test.go @@ -23,25 +23,23 @@ func TestAccFirehoseDeliveryStream_basic(t *testing.T) { resourceName := "aws_kinesis_firehose_delivery_stream.test" rInt := sdkacctest.RandInt() - funcName := fmt.Sprintf("aws_kinesis_firehose_ds_import_%d", rInt) - policyName := fmt.Sprintf("tf_acc_policy_%d", rInt) - roleName := fmt.Sprintf("tf_acc_role_%d", rInt) var stream firehose.DeliveryStreamDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - config := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3basic, + config := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, rInt, rInt, rInt, rInt) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -71,25 +69,23 @@ func TestAccFirehoseDeliveryStream_disappears(t *testing.T) { resourceName := "aws_kinesis_firehose_delivery_stream.test" rInt := sdkacctest.RandInt() - funcName := fmt.Sprintf("aws_kinesis_firehose_ds_import_%d", rInt) - policyName := fmt.Sprintf("tf_acc_policy_%d", rInt) - roleName := fmt.Sprintf("tf_acc_role_%d", rInt) var stream firehose.DeliveryStreamDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - config := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3basic, + config := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, rInt, rInt, rInt, rInt) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), acctest.CheckResourceDisappears(acctest.Provider, tffirehose.ResourceDeliveryStream(), resourceName), ), ExpectNonEmptyPlan: true, @@ -102,19 +98,19 @@ func TestAccFirehoseDeliveryStream_s3basic(t *testing.T) { var stream firehose.DeliveryStreamDescription ri := sdkacctest.RandInt() resourceName := "aws_kinesis_firehose_delivery_stream.test" - config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic, + config := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -126,7 +122,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { var stream firehose.DeliveryStreamDescription rInt := sdkacctest.RandInt() rName := fmt.Sprintf("terraform-kinesis-firehose-basictest-%d", rInt) - config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic, + config := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, rInt, rInt, rInt, rInt) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -134,12 +130,12 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSE(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, true), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "true"), @@ -147,9 +143,9 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { ), }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "false"), @@ -160,9 +156,9 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { PlanOnly: true, }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSE(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, true), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "true"), @@ -183,12 +179,12 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyARN(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, rInt, true), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "true"), @@ -197,18 +193,18 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyARN(t *testing.T) { ), }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "false"), ), }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, rInt, true), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "true"), @@ -230,12 +226,12 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyType(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, rInt, true, firehose.KeyTypeAwsOwnedCmk), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, rInt, true, firehose.KeyTypeAwsOwnedCmk), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "true"), @@ -243,18 +239,18 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyType(t *testing.T) { ), }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "false"), ), }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, rInt, true, firehose.KeyTypeAwsOwnedCmk), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, rInt, true, firehose.KeyTypeAwsOwnedCmk), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "true"), @@ -269,7 +265,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { var stream firehose.DeliveryStreamDescription rInt := sdkacctest.RandInt() rName := fmt.Sprintf("terraform-kinesis-firehose-basictest-%d", rInt) - config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic, + config := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, rInt, rInt, rInt, rInt) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -277,21 +273,21 @@ func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithTags(rName, rInt), + Config: testAccDeliveryStreamConfig_s3basicWithTags(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.Usage", "original"), ), }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3basicWithTagsChanged(rName, rInt), + Config: testAccDeliveryStreamConfig_s3basicWithTagsChanged(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.Usage", "changed"), @@ -300,7 +296,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -313,19 +309,19 @@ func TestAccFirehoseDeliveryStream_s3KinesisStreamSource(t *testing.T) { var stream firehose.DeliveryStreamDescription ri := sdkacctest.RandInt() resourceName := "aws_kinesis_firehose_delivery_stream.test" - config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3KinesisStreamSource, + config := fmt.Sprintf(testAccDeliveryStreamConfig_s3KinesisStreamSource, ri, ri, ri, ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -342,12 +338,12 @@ func TestAccFirehoseDeliveryStream_s3WithCloudWatchLogging(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3WithCloudwatchLogging(ri), + Config: testAccDeliveryStreamConfig_s3WithCloudwatchLogging(ri), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -360,9 +356,9 @@ func TestAccFirehoseDeliveryStream_s3Updates(t *testing.T) { resourceName := "aws_kinesis_firehose_delivery_stream.test" ri := sdkacctest.RandInt() - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic, + preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3Updates, + postConfig := fmt.Sprintf(testAccDeliveryStreamConfig_s3Updates, ri, ri, ri, ri) updatedS3DestinationConfig := &firehose.S3DestinationDescription{ @@ -376,19 +372,19 @@ func TestAccFirehoseDeliveryStream_s3Updates(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, updatedS3DestinationConfig, nil, nil, nil, nil, nil), ), }, @@ -397,28 +393,25 @@ func TestAccFirehoseDeliveryStream_s3Updates(t *testing.T) { } func TestAccFirehoseDeliveryStream_extendedS3basic(t *testing.T) { - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) resourceName := "aws_kinesis_firehose_delivery_stream.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) var stream firehose.DeliveryStreamDescription ri := sdkacctest.RandInt() - config := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3basic, + config := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.error_output_prefix", ""), @@ -443,12 +436,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *tes PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, false), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, false), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.enabled", "false"), @@ -460,18 +453,18 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *tes ImportStateVerify: true, }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, true), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, true), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.enabled", "true"), ), }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, false), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, false), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.enabled", "false"), @@ -491,12 +484,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.#", "1"), @@ -529,9 +522,9 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { t.Fatalf("Unable to update firehose destination: %s", err) } }, - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.#", "1"), @@ -551,12 +544,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_up PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.input_format_configuration.#", "1"), @@ -570,9 +563,9 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_up ImportStateVerify: true, }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.input_format_configuration.#", "1"), @@ -594,12 +587,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionHiveJSONSerDe_e PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.input_format_configuration.#", "1"), @@ -626,12 +619,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOpenXJSONSerDe_ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.input_format_configuration.#", "1"), @@ -658,12 +651,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOrcSerDe_empty( PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.output_format_configuration.#", "1"), @@ -690,12 +683,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionParquetSerDe_em PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.output_format_configuration.#", "1"), @@ -722,12 +715,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_upda PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.output_format_configuration.#", "1"), @@ -741,9 +734,9 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_upda ImportStateVerify: true, }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.data_format_conversion_configuration.0.output_format_configuration.#", "1"), @@ -765,12 +758,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, rInt, "prefix1"), + Config: testAccDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, rInt, "prefix1"), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.error_output_prefix", "prefix1"), ), @@ -781,9 +774,9 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, rInt, "prefix2"), + Config: testAccDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, rInt, "prefix2"), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.error_output_prefix", "prefix2"), ), @@ -803,12 +796,12 @@ func TestAccFirehoseDeliveryStream_ExtendedS3Processing_empty(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ExtendedS3_ProcessingConfiguration_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_ProcessingConfiguration_Empty(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.#", "1"), ), @@ -823,28 +816,26 @@ func TestAccFirehoseDeliveryStream_ExtendedS3Processing_empty(t *testing.T) { } func TestAccFirehoseDeliveryStream_extendedS3KMSKeyARN(t *testing.T) { - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) resourceName := "aws_kinesis_firehose_delivery_stream.test" var stream firehose.DeliveryStreamDescription ri := sdkacctest.RandInt() - config := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3KmsKeyArn, + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + config := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3KmsKeyArn, ri, ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttrPair(resourceName, "extended_s3_configuration.0.kms_key_arn", "aws_kms_key.test", "arn"), ), @@ -874,12 +865,12 @@ func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_extendedS3DynamicPartitioning(rName, ri), + Config: testAccDeliveryStreamConfig_extendedS3DynamicPartitioning(rName, ri), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.dynamic_partitioning_configuration.#", "1"), @@ -895,23 +886,20 @@ func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { } func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) resourceName := "aws_kinesis_firehose_delivery_stream.test" var stream firehose.DeliveryStreamDescription ri := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - preConfig := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3basic, + preConfig := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, ri, ri, ri, ri) - firstUpdateConfig := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3Updates_Initial, + firstUpdateConfig := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3Updates_Initial, ri, ri, ri, ri) - removeProcessorsConfig := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3Updates_RemoveProcessors, + removeProcessorsConfig := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3Updates_RemoveProcessors, ri, ri, ri, ri) firstUpdateExtendedS3DestinationConfig := &firehose.ExtendedS3DestinationDescription{ @@ -952,12 +940,12 @@ func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3, + CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -969,14 +957,14 @@ func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { { Config: firstUpdateConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, firstUpdateExtendedS3DestinationConfig, nil, nil, nil, nil), ), }, { Config: removeProcessorsConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, removeProcessorsExtendedS3DestinationConfig, nil, nil, nil, nil), ), }, @@ -988,19 +976,19 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_kinesisStreamSource(t *testing.T) var stream firehose.DeliveryStreamDescription ri := sdkacctest.RandInt() resourceName := "aws_kinesis_firehose_delivery_stream.test" - config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_extendedS3_KinesisStreamSource, + config := fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3_KinesisStreamSource, ri, ri, ri, ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: config, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -1044,12 +1032,12 @@ func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamRedshiftConfig(rName, rInt), + Config: testAccDeliveryStreamRedshiftConfig(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -1060,9 +1048,9 @@ func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { ImportStateVerifyIgnore: []string{"redshift_configuration.0.password"}, }, { - Config: testAccKinesisFirehoseDeliveryStreamRedshiftConfigUpdates(rName, rInt), + Config: testAccDeliveryStreamRedshiftConfigUpdates(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, updatedRedshiftConfig, nil, nil, nil), ), }, @@ -1075,15 +1063,12 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { ri := sdkacctest.RandInt() resourceName := "aws_kinesis_firehose_delivery_stream.test" - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_SplunkBasic, + preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_SplunkBasic, ri, ri, ri, ri) - postConfig := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_SplunkUpdates, + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + postConfig := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_SplunkUpdates, ri, ri, ri, ri) updatedSplunkConfig := &firehose.SplunkDestinationDescription{ @@ -1110,12 +1095,12 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -1127,7 +1112,7 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, updatedSplunkConfig, nil), ), }, @@ -1140,15 +1125,13 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { ri := sdkacctest.RandInt() resourceName := "aws_kinesis_firehose_delivery_stream.test" - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_HTTPEndpointBasic, + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_HTTPEndpointBasic, ri, ri, ri, ri) - postConfig := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_HTTPEndpointUpdates, + postConfig := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_HTTPEndpointUpdates, ri, ri, ri, ri) updatedHTTPEndpointConfig := &firehose.HttpEndpointDestinationDescription{ @@ -1177,12 +1160,12 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -1194,7 +1177,7 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, updatedHTTPEndpointConfig), ), }, @@ -1211,12 +1194,12 @@ func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rInt, 301), + Config: testAccDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rInt, 301), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), ), }, { @@ -1225,9 +1208,9 @@ func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rInt, 302), + Config: testAccDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rInt, 302), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), ), }, }, @@ -1239,14 +1222,12 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { resourceName := "aws_kinesis_firehose_delivery_stream.test" ri := sdkacctest.RandInt() - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchBasic, + + preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchBasic, ri, ri, ri, ri, ri, ri) - postConfig := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchUpdate, + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + postConfig := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchUpdate, ri, ri, ri, ri, ri, ri) updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ @@ -1273,12 +1254,12 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -1290,7 +1271,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticsearchConfig, nil, nil), ), }, @@ -1303,14 +1284,12 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { resourceName := "aws_kinesis_firehose_delivery_stream.test" ri := sdkacctest.RandInt() - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchEndpoint, + + preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchEndpoint, ri, ri, ri, ri, ri, ri) - postConfig := testAccLambdaBasicConfig(funcName, policyName, roleName) + - fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchEndpointUpdate, + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + postConfig := testAccLambdaBasicConfigNew(rName) + + fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchEndpointUpdate, ri, ri, ri, ri, ri, ri) updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ @@ -1337,12 +1316,12 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -1354,7 +1333,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticsearchConfig, nil, nil), ), }, @@ -1369,10 +1348,8 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { resourceName := "aws_kinesis_firehose_delivery_stream.test" ri := sdkacctest.RandInt() - rString := sdkacctest.RandString(8) - funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - roleName := fmt.Sprintf("tf_acc_role_%s", rString) + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ BufferingHints: &firehose.ElasticsearchBufferingHints{ @@ -1398,12 +1375,12 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { PreCheck: func() { acctest.PreCheck(t); testAccPreCheckIamServiceLinkedRoleEs(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchVpcBasic(ri), + Config: testAccDeliveryStreamConfig_ElasticsearchVpcBasic(ri), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), resource.TestCheckResourceAttrPair(resourceName, "elasticsearch_configuration.0.vpc_config.0.vpc_id", "aws_vpc.elasticsearch_in_vpc", "id"), resource.TestCheckResourceAttr(resourceName, "elasticsearch_configuration.0.vpc_config.0.subnet_ids.#", "2"), @@ -1417,9 +1394,9 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchVpcUpdate(funcName, policyName, roleName, ri), + Config: testAccDeliveryStreamConfig_ElasticsearchVpcUpdate(rName, ri), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticsearchConfig, nil, nil), resource.TestCheckResourceAttrPair(resourceName, "elasticsearch_configuration.0.vpc_config.0.vpc_id", "aws_vpc.elasticsearch_in_vpc", "id"), resource.TestCheckResourceAttr(resourceName, "elasticsearch_configuration.0.vpc_config.0.subnet_ids.#", "2"), @@ -1441,12 +1418,12 @@ func TestAccFirehoseDeliveryStream_missingProcessing(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccKinesisFirehoseDeliveryStreamConfig_missingProcessingConfiguration(ri), + Config: testAccDeliveryStreamConfig_missingProcessingConfiguration(ri), Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists(resourceName, &stream), + testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, @@ -1459,7 +1436,7 @@ func TestAccFirehoseDeliveryStream_missingProcessing(t *testing.T) { }) } -func testAccCheckKinesisFirehoseDeliveryStreamExists(n string, v *firehose.DeliveryStreamDescription) resource.TestCheckFunc { +func testAccCheckDeliveryStreamExists(n string, v *firehose.DeliveryStreamDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -1658,8 +1635,8 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } -func testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3(s *terraform.State) error { - err := testAccCheckKinesisFirehoseDeliveryStreamDestroy(s) +func testAccCheckDeliveryStreamDestroy_ExtendedS3(s *terraform.State) error { + err := testAccCheckDeliveryStreamDestroy(s) if err == nil { err = testAccCheckFirehoseLambdaFunctionDestroy(s) @@ -1668,7 +1645,7 @@ func testAccCheckKinesisFirehoseDeliveryStreamDestroy_ExtendedS3(s *terraform.St return err } -func testAccCheckKinesisFirehoseDeliveryStreamDestroy(s *terraform.State) error { +func testAccCheckDeliveryStreamDestroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "aws_kinesis_firehose_delivery_stream" { continue @@ -1780,7 +1757,70 @@ resource "aws_lambda_function" "lambda_function_test" { `, funcName) } -const testAccKinesisFirehoseDeliveryStreamBaseConfig = ` +func testAccLambdaBasicConfigNew(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "iam_policy_for_lambda" { + name = %[1]q + role = aws_iam_role.iam_for_lambda.id + + policy = < Date: Wed, 17 Nov 2021 14:24:56 -0800 Subject: [PATCH 249/304] Update quicksight_user documentation example to include a valid example (#21819) * change to session name * appease litners --- website/docs/r/quicksight_user.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/quicksight_user.html.markdown b/website/docs/r/quicksight_user.html.markdown index 117a4134e771..fbae1938bae8 100644 --- a/website/docs/r/quicksight_user.html.markdown +++ b/website/docs/r/quicksight_user.html.markdown @@ -14,7 +14,7 @@ Resource for managing QuickSight User ```terraform resource "aws_quicksight_user" "example" { - user_name = "an-author" + session_name = "an-author" email = "author@example.com" identity_type = "IAM" user_role = "AUTHOR" From bbf54c997eef70d0b84b23fc097f249c89b79701 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 17 Nov 2021 14:55:21 -0800 Subject: [PATCH 250/304] Reverts `expandOverrideAction()` to private function --- internal/service/waf/flex.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/waf/flex.go b/internal/service/waf/flex.go index f6fae2f577e9..a97d86b6bbfb 100644 --- a/internal/service/waf/flex.go +++ b/internal/service/waf/flex.go @@ -17,7 +17,7 @@ func ExpandAction(l []interface{}) *waf.WafAction { } } -func ExpandOverrideAction(l []interface{}) *waf.WafOverrideAction { +func expandOverrideAction(l []interface{}) *waf.WafOverrideAction { if len(l) == 0 || l[0] == nil { return nil } @@ -35,7 +35,7 @@ func ExpandWebACLUpdate(updateAction string, aclRule map[string]interface{}) *wa switch aclRule["type"].(string) { case waf.WafRuleTypeGroup: rule = &waf.ActivatedRule{ - OverrideAction: ExpandOverrideAction(aclRule["override_action"].([]interface{})), + OverrideAction: expandOverrideAction(aclRule["override_action"].([]interface{})), Priority: aws.Int64(int64(aclRule["priority"].(int))), RuleId: aws.String(aclRule["rule_id"].(string)), Type: aws.String(aclRule["type"].(string)), From 32354f9fd659107fc356796397b6849b89f60bd4 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 17 Nov 2021 15:43:15 -0800 Subject: [PATCH 251/304] Fixes sweepers --- internal/service/docdb/sweep.go | 7 ++++--- internal/service/s3control/sweep.go | 28 +++++++++++++++++++++------ internal/service/wafregional/sweep.go | 3 ++- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/internal/service/docdb/sweep.go b/internal/service/docdb/sweep.go index 3e8b432f1963..5d2887e2c795 100644 --- a/internal/service/docdb/sweep.go +++ b/internal/service/docdb/sweep.go @@ -12,12 +12,13 @@ import ( "github.com/aws/aws-sdk-go/service/docdb" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" ) func init() { resource.AddTestSweepers("aws_docdb_global_cluster", &resource.Sweeper{ Name: "aws_docdb_global_cluster", - F: testSweepDocDBGlobalClusters, + F: sweepGlobalClusters, Dependencies: []string{ "aws_docdb_cluster", }, @@ -50,14 +51,14 @@ func sweepGlobalClusters(region string) error { continue } - if err := WaitForDocDBGlobalClusterDeletion(context.TODO(), conn, id); err != nil { + if err := WaitForGlobalClusterDeletion(context.TODO(), conn, id, GlobalClusterDeleteTimeout); err != nil { log.Printf("[ERROR] Failure while waiting for DocDB Global Cluster (%s) to be deleted: %s", id, err) } } return !lastPage }) - if testSweepSkipSweepError(err) { + if sweep.SkipSweepError(err) { log.Printf("[WARN] Skipping DocDB Global Cluster sweep for %s: %s", region, err) return nil } diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index c10f319fdf65..7572c24b134a 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/s3control" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/sweep" @@ -46,6 +47,7 @@ func sweepAccessPoints(region string) error { AccountId: aws.String(accountID), } sweepResources := make([]*sweep.SweepResource, 0) + var sweeperErrs *multierror.Error err = conn.ListAccessPointsPages(input, func(page *s3control.ListAccessPointsOutput, lastPage bool) bool { if page == nil { @@ -55,7 +57,13 @@ func sweepAccessPoints(region string) error { for _, accessPoint := range page.AccessPointList { r := ResourceAccessPoint() d := r.Data(nil) - d.SetId(AccessPointCreateResourceID(aws.StringValue(accessPoint.AccessPointArn), accountID, aws.StringValue(accessPoint.Name))) + id, err := AccessPointCreateResourceID(aws.StringValue(accessPoint.AccessPointArn)) + if err != nil { + sweeperErr := fmt.Errorf("error composing S3 Access Point ID (%s): %w", aws.StringValue(accessPoint.AccessPointArn), err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + } + d.SetId(id) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } @@ -65,20 +73,28 @@ func sweepAccessPoints(region string) error { if sweep.SkipSweepError(err) { log.Printf("[WARN] Skipping S3 Access Point sweep for %s: %s", region, err) - return nil + return sweeperErrs.ErrorOrNil() } if err != nil { - return fmt.Errorf("error listing SS3 Access Points (%s): %w", region, err) + sweeperErr := fmt.Errorf("error listing S3 Access Points (%s): %w", region, err) + if sweeperErrs.Len() > 0 { + return multierror.Append(sweeperErr, sweeperErrs) + } + return sweeperErr } err = sweep.SweepOrchestrator(sweepResources) if err != nil { - return fmt.Errorf("error sweeping S3 Access Points (%s): %w", region, err) + sweeperErr := fmt.Errorf("error sweeping S3 Access Points (%s): %w", region, err) + if sweeperErrs.Len() > 0 { + return multierror.Append(sweeperErr, sweeperErrs) + } + return sweeperErr } - return nil + return sweeperErrs.ErrorOrNil() } func sweepMultiRegionAccessPoints(region string) error { @@ -151,7 +167,7 @@ func sweepObjectLambdaAccessPoints(region string) error { for _, accessPoint := range page.ObjectLambdaAccessPointList { r := ResourceObjectLambdaAccessPoint() d := r.Data(nil) - d.SetId(AccessPointCreateResourceID(aws.StringValue(accessPoint.ObjectLambdaAccessPointArn), accountID, aws.StringValue(accessPoint.Name))) + d.SetId(ObjectLambdaAccessPointCreateResourceID(accountID, aws.StringValue(accessPoint.Name))) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } diff --git a/internal/service/wafregional/sweep.go b/internal/service/wafregional/sweep.go index bf7ce278449d..cd10011f043f 100644 --- a/internal/service/wafregional/sweep.go +++ b/internal/service/wafregional/sweep.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwaf "github.com/hashicorp/terraform-provider-aws/internal/service/waf" "github.com/hashicorp/terraform-provider-aws/internal/sweep" ) @@ -218,7 +219,7 @@ func sweepRuleGroups(region string) error { if err != nil { return err } - oldRules := FlattenWAFActivatedRules(rResp.ActivatedRules) + oldRules := tfwaf.FlattenActivatedRules(rResp.ActivatedRules) err = DeleteRuleGroup(*group.RuleGroupId, oldRules, conn, region) if err != nil { return err From cd8d68c328bc8eed615e7c6ee1a46ce8a0d455e8 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 19:00:13 -0500 Subject: [PATCH 252/304] Standardize naming --- internal/service/firehose/delivery_stream_data_source_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/firehose/delivery_stream_data_source_test.go b/internal/service/firehose/delivery_stream_data_source_test.go index 4d418519c1f5..bff727d5ad71 100644 --- a/internal/service/firehose/delivery_stream_data_source_test.go +++ b/internal/service/firehose/delivery_stream_data_source_test.go @@ -19,7 +19,7 @@ func TestAccFirehoseDeliveryStreamDataSource_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { Config: testAccDeliveryStreamBasicDataSourceConfig(rName), From cb91594ded989d3a1b0db6c9c231625c516752f9 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 19:00:47 -0500 Subject: [PATCH 253/304] Remove service from func names --- internal/service/firehose/delivery_stream.go | 90 ++++++++++---------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index bf404c7c3b82..81191d70485f 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -706,7 +706,7 @@ func flattenDynamicPartitioningConfiguration(dpc *firehose.DynamicPartitioningCo return dynamicPartitioningConfiguration } -func flattenFirehoseKinesisSourceConfiguration(desc *firehose.KinesisStreamSourceDescription) []interface{} { +func flattenSourceConfiguration(desc *firehose.KinesisStreamSourceDescription) []interface{} { if desc == nil { return []interface{}{} } @@ -719,7 +719,7 @@ func flattenFirehoseKinesisSourceConfiguration(desc *firehose.KinesisStreamSourc return []interface{}{mDesc} } -func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.DeliveryStreamDescription) error { +func flattenDeliveryStream(d *schema.ResourceData, s *firehose.DeliveryStreamDescription) error { d.Set("version_id", s.VersionId) d.Set("arn", s.DeliveryStreamARN) d.Set("name", s.DeliveryStreamName) @@ -745,7 +745,7 @@ func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.De } if s.Source != nil { - if err := d.Set("kinesis_source_configuration", flattenFirehoseKinesisSourceConfiguration(s.Source.KinesisStreamSourceDescription)); err != nil { + if err := d.Set("kinesis_source_configuration", flattenSourceConfiguration(s.Source.KinesisStreamSourceDescription)); err != nil { return fmt.Errorf("error setting kinesis_source_configuration: %s", err) } } @@ -780,7 +780,7 @@ func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.De } else if destination.HttpEndpointDestinationDescription != nil { d.Set("destination", firehoseDestinationTypeHttpEndpoint) configuredAccessKey := d.Get("http_endpoint_configuration.0.access_key").(string) - if err := d.Set("http_endpoint_configuration", flattenFirehoseHttpEndpointConfiguration(destination.HttpEndpointDestinationDescription, configuredAccessKey)); err != nil { + if err := d.Set("http_endpoint_configuration", flattenHTTPEndpointConfiguration(destination.HttpEndpointDestinationDescription, configuredAccessKey)); err != nil { return fmt.Errorf("error setting http_endpoint_configuration: %s", err) } if err := d.Set("s3_configuration", flattenFirehoseS3Configuration(destination.HttpEndpointDestinationDescription.S3DestinationDescription)); err != nil { @@ -803,7 +803,7 @@ func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.De return nil } -func flattenFirehoseHttpEndpointConfiguration(description *firehose.HttpEndpointDestinationDescription, configuredAccessKey string) []map[string]interface{} { +func flattenHTTPEndpointConfiguration(description *firehose.HttpEndpointDestinationDescription, configuredAccessKey string) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } @@ -1663,7 +1663,7 @@ func createExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat }, Prefix: extractPrefixConfiguration(s3), CompressionFormat: aws.String(s3["compression_format"].(string)), - DataFormatConversionConfiguration: expandFirehoseDataFormatConversionConfiguration(s3["data_format_conversion_configuration"].([]interface{})), + DataFormatConversionConfiguration: expandDataFormatConversionConfiguration(s3["data_format_conversion_configuration"].([]interface{})), EncryptionConfiguration: extractEncryptionConfiguration(s3), } @@ -1759,7 +1759,7 @@ func updateExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat Prefix: extractPrefixConfiguration(s3), CompressionFormat: aws.String(s3["compression_format"].(string)), EncryptionConfiguration: extractEncryptionConfiguration(s3), - DataFormatConversionConfiguration: expandFirehoseDataFormatConversionConfiguration(s3["data_format_conversion_configuration"].([]interface{})), + DataFormatConversionConfiguration: expandDataFormatConversionConfiguration(s3["data_format_conversion_configuration"].([]interface{})), CloudWatchLoggingOptions: extractCloudWatchLoggingConfiguration(s3), ProcessingConfiguration: extractProcessingConfiguration(s3), } @@ -1788,7 +1788,7 @@ func updateExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat return configuration } -func expandFirehoseDataFormatConversionConfiguration(l []interface{}) *firehose.DataFormatConversionConfiguration { +func expandDataFormatConversionConfiguration(l []interface{}) *firehose.DataFormatConversionConfiguration { if len(l) == 0 || l[0] == nil { // It is possible to just pass nil here, but this seems to be the // canonical form that AWS uses, and is less likely to produce diffs. @@ -1801,13 +1801,13 @@ func expandFirehoseDataFormatConversionConfiguration(l []interface{}) *firehose. return &firehose.DataFormatConversionConfiguration{ Enabled: aws.Bool(m["enabled"].(bool)), - InputFormatConfiguration: expandFirehoseInputFormatConfiguration(m["input_format_configuration"].([]interface{})), - OutputFormatConfiguration: expandFirehoseOutputFormatConfiguration(m["output_format_configuration"].([]interface{})), - SchemaConfiguration: expandFirehoseSchemaConfiguration(m["schema_configuration"].([]interface{})), + InputFormatConfiguration: expandInputFormatConfiguration(m["input_format_configuration"].([]interface{})), + OutputFormatConfiguration: expandOutputFormatConfiguration(m["output_format_configuration"].([]interface{})), + SchemaConfiguration: expandSchemaConfiguration(m["schema_configuration"].([]interface{})), } } -func expandFirehoseInputFormatConfiguration(l []interface{}) *firehose.InputFormatConfiguration { +func expandInputFormatConfiguration(l []interface{}) *firehose.InputFormatConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -1815,11 +1815,11 @@ func expandFirehoseInputFormatConfiguration(l []interface{}) *firehose.InputForm m := l[0].(map[string]interface{}) return &firehose.InputFormatConfiguration{ - Deserializer: expandFirehoseDeserializer(m["deserializer"].([]interface{})), + Deserializer: expandDeserializer(m["deserializer"].([]interface{})), } } -func expandFirehoseDeserializer(l []interface{}) *firehose.Deserializer { +func expandDeserializer(l []interface{}) *firehose.Deserializer { if len(l) == 0 || l[0] == nil { return nil } @@ -1827,12 +1827,12 @@ func expandFirehoseDeserializer(l []interface{}) *firehose.Deserializer { m := l[0].(map[string]interface{}) return &firehose.Deserializer{ - HiveJsonSerDe: expandFirehoseHiveJsonSerDe(m["hive_json_ser_de"].([]interface{})), - OpenXJsonSerDe: expandFirehoseOpenXJsonSerDe(m["open_x_json_ser_de"].([]interface{})), + HiveJsonSerDe: expandHiveJSONSerDe(m["hive_json_ser_de"].([]interface{})), + OpenXJsonSerDe: expandOpenXJSONSerDe(m["open_x_json_ser_de"].([]interface{})), } } -func expandFirehoseHiveJsonSerDe(l []interface{}) *firehose.HiveJsonSerDe { +func expandHiveJSONSerDe(l []interface{}) *firehose.HiveJsonSerDe { if len(l) == 0 { return nil } @@ -1848,7 +1848,7 @@ func expandFirehoseHiveJsonSerDe(l []interface{}) *firehose.HiveJsonSerDe { } } -func expandFirehoseOpenXJsonSerDe(l []interface{}) *firehose.OpenXJsonSerDe { +func expandOpenXJSONSerDe(l []interface{}) *firehose.OpenXJsonSerDe { if len(l) == 0 { return nil } @@ -1866,7 +1866,7 @@ func expandFirehoseOpenXJsonSerDe(l []interface{}) *firehose.OpenXJsonSerDe { } } -func expandFirehoseOutputFormatConfiguration(l []interface{}) *firehose.OutputFormatConfiguration { +func expandOutputFormatConfiguration(l []interface{}) *firehose.OutputFormatConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -1874,11 +1874,11 @@ func expandFirehoseOutputFormatConfiguration(l []interface{}) *firehose.OutputFo m := l[0].(map[string]interface{}) return &firehose.OutputFormatConfiguration{ - Serializer: expandFirehoseSerializer(m["serializer"].([]interface{})), + Serializer: expandSerializer(m["serializer"].([]interface{})), } } -func expandFirehoseSerializer(l []interface{}) *firehose.Serializer { +func expandSerializer(l []interface{}) *firehose.Serializer { if len(l) == 0 || l[0] == nil { return nil } @@ -1886,12 +1886,12 @@ func expandFirehoseSerializer(l []interface{}) *firehose.Serializer { m := l[0].(map[string]interface{}) return &firehose.Serializer{ - OrcSerDe: expandFirehoseOrcSerDe(m["orc_ser_de"].([]interface{})), - ParquetSerDe: expandFirehoseParquetSerDe(m["parquet_ser_de"].([]interface{})), + OrcSerDe: expandOrcSerDe(m["orc_ser_de"].([]interface{})), + ParquetSerDe: expandParquetSerDe(m["parquet_ser_de"].([]interface{})), } } -func expandFirehoseOrcSerDe(l []interface{}) *firehose.OrcSerDe { +func expandOrcSerDe(l []interface{}) *firehose.OrcSerDe { if len(l) == 0 { return nil } @@ -1921,7 +1921,7 @@ func expandFirehoseOrcSerDe(l []interface{}) *firehose.OrcSerDe { return orcSerDe } -func expandFirehoseParquetSerDe(l []interface{}) *firehose.ParquetSerDe { +func expandParquetSerDe(l []interface{}) *firehose.ParquetSerDe { if len(l) == 0 { return nil } @@ -1942,7 +1942,7 @@ func expandFirehoseParquetSerDe(l []interface{}) *firehose.ParquetSerDe { } } -func expandFirehoseSchemaConfiguration(l []interface{}) *firehose.SchemaConfiguration { +func expandSchemaConfiguration(l []interface{}) *firehose.SchemaConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -2094,7 +2094,7 @@ func extractCloudWatchLoggingConfiguration(s3 map[string]interface{}) *firehose. } -func extractVpcConfiguration(es map[string]interface{}) *firehose.VpcConfiguration { +func extractVPCConfiguration(es map[string]interface{}) *firehose.VpcConfiguration { config := es["vpc_config"].([]interface{}) if len(config) == 0 { return nil @@ -2225,7 +2225,7 @@ func createElasticsearchConfig(d *schema.ResourceData, s3Config *firehose.S3Dest } if _, ok := es["vpc_config"]; ok { - config.VpcConfiguration = extractVpcConfiguration(es) + config.VpcConfiguration = extractVPCConfiguration(es) } return config, nil @@ -2336,7 +2336,7 @@ func updateSplunkConfig(d *schema.ResourceData, s3Update *firehose.S3Destination return configuration, nil } -func createHttpEndpointConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.HttpEndpointDestinationConfiguration, error) { +func createHTTPEndpointConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.HttpEndpointDestinationConfiguration, error) { HttpEndpointRaw, ok := d.GetOk("http_endpoint_configuration") if !ok { return nil, fmt.Errorf("Error loading HTTP Endpoint Configuration for Kinesis Firehose: http_endpoint_configuration not found") @@ -2346,12 +2346,12 @@ func createHttpEndpointConfig(d *schema.ResourceData, s3Config *firehose.S3Desti HttpEndpoint := sl[0].(map[string]interface{}) configuration := &firehose.HttpEndpointDestinationConfiguration{ - RetryOptions: extractHttpEndpointRetryOptions(HttpEndpoint), + RetryOptions: extractHTTPEndpointRetryOptions(HttpEndpoint), RoleARN: aws.String(HttpEndpoint["role_arn"].(string)), S3Configuration: s3Config, } - configuration.EndpointConfiguration = extractHttpEndpointConfiguration(HttpEndpoint) + configuration.EndpointConfiguration = extractHTTPEndpointConfiguration(HttpEndpoint) bufferingHints := &firehose.HttpEndpointBufferingHints{} @@ -2381,7 +2381,7 @@ func createHttpEndpointConfig(d *schema.ResourceData, s3Config *firehose.S3Desti return configuration, nil } -func updateHttpEndpointConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.HttpEndpointDestinationUpdate, error) { +func updateHTTPEndpointConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.HttpEndpointDestinationUpdate, error) { HttpEndpointRaw, ok := d.GetOk("http_endpoint_configuration") if !ok { return nil, fmt.Errorf("Error loading HTTP Endpoint Configuration for Kinesis Firehose: http_endpoint_configuration not found") @@ -2391,12 +2391,12 @@ func updateHttpEndpointConfig(d *schema.ResourceData, s3Update *firehose.S3Desti HttpEndpoint := sl[0].(map[string]interface{}) configuration := &firehose.HttpEndpointDestinationUpdate{ - RetryOptions: extractHttpEndpointRetryOptions(HttpEndpoint), + RetryOptions: extractHTTPEndpointRetryOptions(HttpEndpoint), RoleARN: aws.String(HttpEndpoint["role_arn"].(string)), S3Update: s3Update, } - configuration.EndpointConfiguration = extractHttpEndpointConfiguration(HttpEndpoint) + configuration.EndpointConfiguration = extractHTTPEndpointConfiguration(HttpEndpoint) bufferingHints := &firehose.HttpEndpointBufferingHints{} @@ -2463,7 +2463,7 @@ func extractRequestConfiguration(rc map[string]interface{}) *firehose.HttpEndpoi return RequestConfiguration } -func extractHttpEndpointConfiguration(ep map[string]interface{}) *firehose.HttpEndpointConfiguration { +func extractHTTPEndpointConfiguration(ep map[string]interface{}) *firehose.HttpEndpointConfiguration { endpointConfiguration := &firehose.HttpEndpointConfiguration{ Url: aws.String(ep["url"].(string)), } @@ -2502,7 +2502,7 @@ func extractElasticsearchRetryOptions(es map[string]interface{}) *firehose.Elast return retryOptions } -func extractHttpEndpointRetryOptions(tfMap map[string]interface{}) *firehose.HttpEndpointRetryOptions { +func extractHTTPEndpointRetryOptions(tfMap map[string]interface{}) *firehose.HttpEndpointRetryOptions { retryOptions := &firehose.HttpEndpointRetryOptions{} if retryDuration, ok := tfMap["retry_duration"].(int); ok { @@ -2598,7 +2598,7 @@ func resourceDeliveryStreamCreate(d *schema.ResourceData, meta interface{}) erro } createInput.SplunkDestinationConfiguration = rc } else if d.Get("destination").(string) == firehoseDestinationTypeHttpEndpoint { - rc, err := createHttpEndpointConfig(d, s3Config) + rc, err := createHTTPEndpointConfig(d, s3Config) if err != nil { return err } @@ -2656,10 +2656,10 @@ func resourceDeliveryStreamCreate(d *schema.ResourceData, meta interface{}) erro d.SetId(aws.StringValue(s.DeliveryStreamARN)) d.Set("arn", s.DeliveryStreamARN) - if v, ok := d.GetOk("server_side_encryption"); ok && !isKinesisFirehoseDeliveryStreamOptionDisabled(v) { + if v, ok := d.GetOk("server_side_encryption"); ok && !isDeliveryStreamOptionDisabled(v) { startInput := &firehose.StartDeliveryStreamEncryptionInput{ DeliveryStreamName: aws.String(sn), - DeliveryStreamEncryptionConfigurationInput: expandFirehoseDeliveryStreamEncryptionConfigurationInput(v.([]interface{})), + DeliveryStreamEncryptionConfigurationInput: expandDeliveryStreamEncryptionConfigurationInput(v.([]interface{})), } _, err := conn.StartDeliveryStreamEncryption(startInput) @@ -2750,7 +2750,7 @@ func resourceDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) erro } updateInput.SplunkDestinationUpdate = rc } else if d.Get("destination").(string) == firehoseDestinationTypeHttpEndpoint { - rc, err := updateHttpEndpointConfig(d, s3Config) + rc, err := updateHTTPEndpointConfig(d, s3Config) if err != nil { return err } @@ -2809,7 +2809,7 @@ func resourceDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) erro if d.HasChange("server_side_encryption") { _, n := d.GetChange("server_side_encryption") - if isKinesisFirehoseDeliveryStreamOptionDisabled(n) { + if isDeliveryStreamOptionDisabled(n) { _, err := conn.StopDeliveryStreamEncryption(&firehose.StopDeliveryStreamEncryptionInput{ DeliveryStreamName: aws.String(sn), }) @@ -2824,7 +2824,7 @@ func resourceDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) erro } else { startInput := &firehose.StartDeliveryStreamEncryptionInput{ DeliveryStreamName: aws.String(sn), - DeliveryStreamEncryptionConfigurationInput: expandFirehoseDeliveryStreamEncryptionConfigurationInput(n.([]interface{})), + DeliveryStreamEncryptionConfigurationInput: expandDeliveryStreamEncryptionConfigurationInput(n.([]interface{})), } _, err := conn.StartDeliveryStreamEncryption(startInput) @@ -2861,7 +2861,7 @@ func resourceDeliveryStreamRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error reading Kinesis Firehose Delivery Stream (%s): %w", sn, err) } - err = flattenKinesisFirehoseDeliveryStream(d, s) + err = flattenDeliveryStream(d, s) if err != nil { return err @@ -2913,7 +2913,7 @@ func resourceDeliveryStreamDelete(d *schema.ResourceData, meta interface{}) erro return nil } -func isKinesisFirehoseDeliveryStreamOptionDisabled(v interface{}) bool { +func isDeliveryStreamOptionDisabled(v interface{}) bool { options := v.([]interface{}) if len(options) == 0 || options[0] == nil { return true @@ -2929,7 +2929,7 @@ func isKinesisFirehoseDeliveryStreamOptionDisabled(v interface{}) bool { return !enabled } -func expandFirehoseDeliveryStreamEncryptionConfigurationInput(tfList []interface{}) *firehose.DeliveryStreamEncryptionConfigurationInput { +func expandDeliveryStreamEncryptionConfigurationInput(tfList []interface{}) *firehose.DeliveryStreamEncryptionConfigurationInput { if len(tfList) == 0 { return nil } From 49473d4a8b960f72ecda6181c93f01fc36d21fb3 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 19:01:02 -0500 Subject: [PATCH 254/304] Standardize tests --- .../service/firehose/delivery_stream_test.go | 764 ++++++++---------- 1 file changed, 350 insertions(+), 414 deletions(-) diff --git a/internal/service/firehose/delivery_stream_test.go b/internal/service/firehose/delivery_stream_test.go index 6fc2bb51ea72..5d6903a0c27b 100644 --- a/internal/service/firehose/delivery_stream_test.go +++ b/internal/service/firehose/delivery_stream_test.go @@ -20,15 +20,9 @@ import ( ) func TestAccFirehoseDeliveryStream_basic(t *testing.T) { - resourceName := "aws_kinesis_firehose_delivery_stream.test" - rInt := sdkacctest.RandInt() - var stream firehose.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - config := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, - rInt, rInt, rInt, rInt) + resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -37,7 +31,7 @@ func TestAccFirehoseDeliveryStream_basic(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccDeliveryStreamConfig_extendedS3basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -66,15 +60,9 @@ func TestAccFirehoseDeliveryStream_basic(t *testing.T) { } func TestAccFirehoseDeliveryStream_disappears(t *testing.T) { - resourceName := "aws_kinesis_firehose_delivery_stream.test" - rInt := sdkacctest.RandInt() - var stream firehose.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - config := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, - rInt, rInt, rInt, rInt) + resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -83,7 +71,7 @@ func TestAccFirehoseDeliveryStream_disappears(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccDeliveryStreamConfig_extendedS3basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), acctest.CheckResourceDisappears(acctest.Provider, tffirehose.ResourceDeliveryStream(), resourceName), @@ -96,10 +84,8 @@ func TestAccFirehoseDeliveryStream_disappears(t *testing.T) { func TestAccFirehoseDeliveryStream_s3basic(t *testing.T) { var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - config := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, - ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -108,7 +94,7 @@ func TestAccFirehoseDeliveryStream_s3basic(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccDeliveryStreamConfig_s3basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -120,10 +106,7 @@ func TestAccFirehoseDeliveryStream_s3basic(t *testing.T) { func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() - rName := fmt.Sprintf("terraform-kinesis-firehose-basictest-%d", rInt) - config := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, - rInt, rInt, rInt, rInt) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ @@ -133,7 +116,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -143,7 +126,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { ), }, { - Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -152,11 +135,11 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { ), }, { - Config: config, + Config: testAccDeliveryStreamConfig_s3basic(rName), PlanOnly: true, }, { - Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -171,8 +154,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSE(t *testing.T) { func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyARN(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() - rName := fmt.Sprintf("terraform-kinesis-firehose-basictest-%d", rInt) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ @@ -182,7 +164,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyARN(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -193,7 +175,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyARN(t *testing.T) { ), }, { - Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -202,7 +184,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyARN(t *testing.T) { ), }, { - Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, rInt, true), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyArn(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -218,8 +200,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyARN(t *testing.T) { func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyType(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() - rName := fmt.Sprintf("terraform-kinesis-firehose-basictest-%d", rInt) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ @@ -229,7 +210,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyType(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, rInt, true, firehose.KeyTypeAwsOwnedCmk), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, true, firehose.KeyTypeAwsOwnedCmk), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -239,7 +220,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyType(t *testing.T) { ), }, { - Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, rInt, false), + Config: testAccDeliveryStreamConfig_s3basicWithSSE(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -248,7 +229,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyType(t *testing.T) { ), }, { - Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, rInt, true, firehose.KeyTypeAwsOwnedCmk), + Config: testAccDeliveryStreamConfig_s3basicWithSSEAndKeyType(rName, true, firehose.KeyTypeAwsOwnedCmk), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -263,10 +244,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithSSEAndKeyType(t *testing.T) { func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() - rName := fmt.Sprintf("terraform-kinesis-firehose-basictest-%d", rInt) - config := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, - rInt, rInt, rInt, rInt) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ @@ -276,7 +254,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_s3basicWithTags(rName, rInt), + Config: testAccDeliveryStreamConfig_s3basicWithTags(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -285,7 +263,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { ), }, { - Config: testAccDeliveryStreamConfig_s3basicWithTagsChanged(rName, rInt), + Config: testAccDeliveryStreamConfig_s3basicWithTagsChanged(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -294,7 +272,7 @@ func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { ), }, { - Config: config, + Config: testAccDeliveryStreamConfig_s3basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -307,10 +285,8 @@ func TestAccFirehoseDeliveryStream_s3basicWithTags(t *testing.T) { func TestAccFirehoseDeliveryStream_s3KinesisStreamSource(t *testing.T) { var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - config := fmt.Sprintf(testAccDeliveryStreamConfig_s3KinesisStreamSource, - ri, ri, ri, ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -319,7 +295,7 @@ func TestAccFirehoseDeliveryStream_s3KinesisStreamSource(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccDeliveryStreamConfig_s3KinesisStreamSource(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -331,7 +307,7 @@ func TestAccFirehoseDeliveryStream_s3KinesisStreamSource(t *testing.T) { func TestAccFirehoseDeliveryStream_s3WithCloudWatchLogging(t *testing.T) { var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ @@ -341,7 +317,7 @@ func TestAccFirehoseDeliveryStream_s3WithCloudWatchLogging(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_s3WithCloudwatchLogging(ri), + Config: testAccDeliveryStreamConfig_s3WithCloudwatchLogging(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -353,14 +329,9 @@ func TestAccFirehoseDeliveryStream_s3WithCloudWatchLogging(t *testing.T) { func TestAccFirehoseDeliveryStream_s3Updates(t *testing.T) { var stream firehose.DeliveryStreamDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - ri := sdkacctest.RandInt() - preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_s3basic, - ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccDeliveryStreamConfig_s3Updates, - ri, ri, ri, ri) - updatedS3DestinationConfig := &firehose.S3DestinationDescription{ BufferingHints: &firehose.BufferingHints{ IntervalInSeconds: aws.Int64(400), @@ -375,14 +346,14 @@ func TestAccFirehoseDeliveryStream_s3Updates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccDeliveryStreamConfig_s3basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), ), }, { - Config: postConfig, + Config: testAccDeliveryStreamConfig_s3Updates(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, updatedS3DestinationConfig, nil, nil, nil, nil, nil), @@ -393,14 +364,9 @@ func TestAccFirehoseDeliveryStream_s3Updates(t *testing.T) { } func TestAccFirehoseDeliveryStream_extendedS3basic(t *testing.T) { - resourceName := "aws_kinesis_firehose_delivery_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() - config := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, - ri, ri, ri, ri) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -409,7 +375,7 @@ func TestAccFirehoseDeliveryStream_extendedS3basic(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: config, + Config: testAccDeliveryStreamConfig_extendedS3basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -428,7 +394,6 @@ func TestAccFirehoseDeliveryStream_extendedS3basic(t *testing.T) { func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -439,7 +404,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *tes CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, false), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -453,7 +418,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *tes ImportStateVerify: true, }, { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, true), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -462,7 +427,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *tes ), }, { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, rInt, false), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_Enabled(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -476,7 +441,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *tes func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -487,7 +451,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -522,7 +486,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { t.Fatalf("Unable to update firehose destination: %s", err) } }, - Config: testAccDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_ExternalUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -536,7 +500,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_update(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -547,7 +510,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_up CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -563,7 +526,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_up ImportStateVerify: true, }, { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -579,7 +542,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_up func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionHiveJSONSerDe_empty(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -590,7 +552,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionHiveJSONSerDe_e CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -611,7 +573,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionHiveJSONSerDe_e func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOpenXJSONSerDe_empty(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -622,7 +583,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOpenXJSONSerDe_ CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -643,7 +604,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOpenXJSONSerDe_ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOrcSerDe_empty(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -654,7 +614,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOrcSerDe_empty( CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -675,7 +635,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOrcSerDe_empty( func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionParquetSerDe_empty(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -686,7 +645,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionParquetSerDe_em CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -707,7 +666,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionParquetSerDe_em func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_update(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -718,7 +676,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_upda CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -734,7 +692,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_upda ImportStateVerify: true, }, { - Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -750,7 +708,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_upda func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -761,7 +718,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, rInt, "prefix1"), + Config: testAccDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, "prefix1"), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -774,7 +731,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, rInt, "prefix2"), + Config: testAccDeliveryStreamConfig_ExtendedS3_ErrorOutputPrefix(rName, "prefix2"), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -788,7 +745,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12600 func TestAccFirehoseDeliveryStream_ExtendedS3Processing_empty(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -799,7 +755,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3Processing_empty(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ExtendedS3_ProcessingConfiguration_Empty(rName, rInt), + Config: testAccDeliveryStreamConfig_ExtendedS3_ProcessingConfiguration_Empty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.#", "1"), @@ -816,16 +772,10 @@ func TestAccFirehoseDeliveryStream_ExtendedS3Processing_empty(t *testing.T) { } func TestAccFirehoseDeliveryStream_extendedS3KMSKeyARN(t *testing.T) { - resourceName := "aws_kinesis_firehose_delivery_stream.test" - var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() + resourceName := "aws_kinesis_firehose_delivery_stream.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - config := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3KmsKeyArn, - ri, ri, ri, ri, ri) - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), @@ -833,7 +783,7 @@ func TestAccFirehoseDeliveryStream_extendedS3KMSKeyARN(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: config, + Config: testAccDeliveryStreamConfig_extendedS3KmsKeyArn(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -850,16 +800,9 @@ func TestAccFirehoseDeliveryStream_extendedS3KMSKeyARN(t *testing.T) { } func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { - //rString := sdkacctest.RandString(8) - //funcName := fmt.Sprintf("aws_kinesis_firehose_delivery_stream_test_%s", rString) - //policyName := fmt.Sprintf("tf_acc_policy_%s", rString) - //roleName := fmt.Sprintf("tf_acc_role_%s", rString) - resourceName := "aws_kinesis_firehose_delivery_stream.test" - var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -868,7 +811,7 @@ func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_extendedS3DynamicPartitioning(rName, ri), + Config: testAccDeliveryStreamConfig_extendedS3DynamicPartitioning(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -886,21 +829,9 @@ func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { } func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { - resourceName := "aws_kinesis_firehose_delivery_stream.test" - var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - preConfig := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3basic, - ri, ri, ri, ri) - firstUpdateConfig := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3Updates_Initial, - ri, ri, ri, ri) - removeProcessorsConfig := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3Updates_RemoveProcessors, - ri, ri, ri, ri) + resourceName := "aws_kinesis_firehose_delivery_stream.test" firstUpdateExtendedS3DestinationConfig := &firehose.ExtendedS3DestinationDescription{ BufferingHints: &firehose.BufferingHints{ @@ -943,7 +874,7 @@ func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccDeliveryStreamConfig_extendedS3basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -955,14 +886,14 @@ func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { ImportStateVerify: true, }, { - Config: firstUpdateConfig, + Config: testAccDeliveryStreamConfig_extendedS3Updates_Initial(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, firstUpdateExtendedS3DestinationConfig, nil, nil, nil, nil), ), }, { - Config: removeProcessorsConfig, + Config: testAccDeliveryStreamConfig_extendedS3Updates_RemoveProcessors(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, removeProcessorsExtendedS3DestinationConfig, nil, nil, nil, nil), @@ -974,10 +905,8 @@ func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { func TestAccFirehoseDeliveryStream_ExtendedS3_kinesisStreamSource(t *testing.T) { var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - config := fmt.Sprintf(testAccDeliveryStreamConfig_extendedS3_KinesisStreamSource, - ri, ri, ri, ri, ri, ri, ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -986,7 +915,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_kinesisStreamSource(t *testing.T) CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccDeliveryStreamConfig_extendedS3_KinesisStreamSource(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1003,7 +932,6 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_kinesisStreamSource(t *testing.T) func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -1035,7 +963,7 @@ func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamRedshiftConfig(rName, rInt), + Config: testAccDeliveryStreamRedshiftConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1048,7 +976,7 @@ func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { ImportStateVerifyIgnore: []string{"redshift_configuration.0.password"}, }, { - Config: testAccDeliveryStreamRedshiftConfigUpdates(rName, rInt), + Config: testAccDeliveryStreamRedshiftConfigUpdates(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, updatedRedshiftConfig, nil, nil, nil), @@ -1060,16 +988,8 @@ func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { var stream firehose.DeliveryStreamDescription - - ri := sdkacctest.RandInt() - resourceName := "aws_kinesis_firehose_delivery_stream.test" - - preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_SplunkBasic, - ri, ri, ri, ri) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - postConfig := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_SplunkUpdates, - ri, ri, ri, ri) + resourceName := "aws_kinesis_firehose_delivery_stream.test" updatedSplunkConfig := &firehose.SplunkDestinationDescription{ HECEndpointType: aws.String("Event"), @@ -1098,7 +1018,7 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccDeliveryStreamConfig_SplunkBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1110,7 +1030,7 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: postConfig, + Config: testAccDeliveryStreamConfig_SplunkUpdates(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, updatedSplunkConfig, nil), @@ -1122,17 +1042,8 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { var stream firehose.DeliveryStreamDescription - - ri := sdkacctest.RandInt() - resourceName := "aws_kinesis_firehose_delivery_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_HTTPEndpointBasic, - ri, ri, ri, ri) - postConfig := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_HTTPEndpointUpdates, - ri, ri, ri, ri) + resourceName := "aws_kinesis_firehose_delivery_stream.test" updatedHTTPEndpointConfig := &firehose.HttpEndpointDestinationDescription{ EndpointConfiguration: &firehose.HttpEndpointDescription{ @@ -1163,7 +1074,7 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccDeliveryStreamConfig_HTTPEndpointBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1175,7 +1086,7 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { ImportStateVerify: true, }, { - Config: postConfig, + Config: testAccDeliveryStreamConfig_HTTPEndpointUpdates(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, updatedHTTPEndpointConfig), @@ -1187,7 +1098,7 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { var stream firehose.DeliveryStreamDescription - rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ @@ -1197,7 +1108,7 @@ func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rInt, 301), + Config: testAccDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rName, 301), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), ), @@ -1208,7 +1119,7 @@ func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rInt, 302), + Config: testAccDeliveryStreamConfig_HTTPEndpoint_RetryDuration(rName, 302), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), ), @@ -1219,16 +1130,8 @@ func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { var stream firehose.DeliveryStreamDescription - - resourceName := "aws_kinesis_firehose_delivery_stream.test" - ri := sdkacctest.RandInt() - - preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchBasic, - ri, ri, ri, ri, ri, ri) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - postConfig := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchUpdate, - ri, ri, ri, ri, ri, ri) + resourceName := "aws_kinesis_firehose_delivery_stream.test" updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ BufferingHints: &firehose.ElasticsearchBufferingHints{ @@ -1257,7 +1160,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccDeliveryStreamConfig_ElasticsearchBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1269,7 +1172,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: postConfig, + Config: testAccDeliveryStreamConfig_ElasticsearchUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticsearchConfig, nil, nil), @@ -1281,16 +1184,8 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { var stream firehose.DeliveryStreamDescription - - resourceName := "aws_kinesis_firehose_delivery_stream.test" - ri := sdkacctest.RandInt() - - preConfig := fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchEndpoint, - ri, ri, ri, ri, ri, ri) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - postConfig := testAccLambdaBasicConfigNew(rName) + - fmt.Sprintf(testAccDeliveryStreamConfig_ElasticsearchEndpointUpdate, - ri, ri, ri, ri, ri, ri) + resourceName := "aws_kinesis_firehose_delivery_stream.test" updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ BufferingHints: &firehose.ElasticsearchBufferingHints{ @@ -1319,7 +1214,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccDeliveryStreamConfig_ElasticsearchEndpoint(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1331,7 +1226,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: postConfig, + Config: testAccDeliveryStreamConfig_ElasticsearchEndpointUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticsearchConfig, nil, nil), @@ -1345,11 +1240,8 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { // when the Kinesis Firehose delivery stream has a VPC Configuration. func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { var stream firehose.DeliveryStreamDescription - - resourceName := "aws_kinesis_firehose_delivery_stream.test" - ri := sdkacctest.RandInt() - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_kinesis_firehose_delivery_stream.test" updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ BufferingHints: &firehose.ElasticsearchBufferingHints{ @@ -1378,7 +1270,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ElasticsearchVpcBasic(ri), + Config: testAccDeliveryStreamConfig_ElasticsearchVpcBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1394,7 +1286,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccDeliveryStreamConfig_ElasticsearchVpcUpdate(rName, ri), + Config: testAccDeliveryStreamConfig_ElasticsearchVpcUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticsearchConfig, nil, nil), @@ -1411,7 +1303,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { // Regression test for https://github.com/hashicorp/terraform-provider-aws/issues/1657 func TestAccFirehoseDeliveryStream_missingProcessing(t *testing.T) { var stream firehose.DeliveryStreamDescription - ri := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ @@ -1421,7 +1313,7 @@ func TestAccFirehoseDeliveryStream_missingProcessing(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_missingProcessingConfiguration(ri), + Config: testAccDeliveryStreamConfig_missingProcessingConfiguration(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1639,7 +1531,7 @@ func testAccCheckDeliveryStreamDestroy_ExtendedS3(s *terraform.State) error { err := testAccCheckDeliveryStreamDestroy(s) if err == nil { - err = testAccCheckFirehoseLambdaFunctionDestroy(s) + err = testAccCheckLambdaFunctionDestroy(s) } return err @@ -1670,7 +1562,7 @@ func testAccCheckDeliveryStreamDestroy(s *terraform.State) error { return nil } -func testAccCheckFirehoseLambdaFunctionDestroy(s *terraform.State) error { +func testAccCheckLambdaFunctionDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaConn for _, rs := range s.RootModule().Resources { @@ -1690,10 +1582,10 @@ func testAccCheckFirehoseLambdaFunctionDestroy(s *terraform.State) error { return nil } -func testAccLambdaConfig(policyName, roleName string) string { +func testAccLambdaBasicConfig(rName string) string { return fmt.Sprintf(` resource "aws_iam_role_policy" "iam_policy_for_lambda" { - name = "%s" + name = "%[1]s-lambda" role = aws_iam_role.iam_for_lambda.id policy = < Date: Wed, 17 Nov 2021 19:02:34 -0500 Subject: [PATCH 255/304] Fix test formatting --- internal/service/firehose/delivery_stream_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/firehose/delivery_stream_test.go b/internal/service/firehose/delivery_stream_test.go index 5d6903a0c27b..8c6f506f8efc 100644 --- a/internal/service/firehose/delivery_stream_test.go +++ b/internal/service/firehose/delivery_stream_test.go @@ -2559,11 +2559,11 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { destination = "extended_s3" extended_s3_configuration { - role_arn = aws_iam_role.firehose.arn - bucket_arn = aws_s3_bucket.bucket.arn - prefix = "custom-prefix/customerId=!{partitionKeyFromLambda:customerId}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/" + role_arn = aws_iam_role.firehose.arn + bucket_arn = aws_s3_bucket.bucket.arn + prefix = "custom-prefix/customerId=!{partitionKeyFromLambda:customerId}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/" error_output_prefix = "prefix1" - buffer_size = 64 + buffer_size = 64 dynamic_partitioning_configuration { enabled = true From d2fc750fcea2aa06d7e14fa4c8f48a89616ca0a9 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 19:21:12 -0500 Subject: [PATCH 256/304] Remove service from func names --- internal/service/firehose/delivery_stream.go | 140 +++++++++---------- 1 file changed, 68 insertions(+), 72 deletions(-) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index 81191d70485f..d8770dd5d83c 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -22,12 +22,12 @@ import ( ) const ( - firehoseDestinationTypeS3 = "s3" - firehoseDestinationTypeExtendedS3 = "extended_s3" - firehoseDestinationTypeElasticsearch = "elasticsearch" - firehoseDestinationTypeRedshift = "redshift" - firehoseDestinationTypeSplunk = "splunk" - firehoseDestinationTypeHttpEndpoint = "http_endpoint" + destinationTypeS3 = "s3" + destinationTypeExtendedS3 = "extended_s3" + destinationTypeElasticsearch = "elasticsearch" + destinationTypeRedshift = "redshift" + destinationTypeSplunk = "splunk" + destinationTypeHttpEndpoint = "http_endpoint" ) func cloudWatchLoggingOptionsSchema() *schema.Schema { @@ -237,7 +237,7 @@ func flattenCloudwatchLoggingOptions(clo *firehose.CloudWatchLoggingOptions) []i return []interface{}{cloudwatchLoggingOptions} } -func flattenFirehoseElasticsearchConfiguration(description *firehose.ElasticsearchDestinationDescription) []map[string]interface{} { +func flattenElasticsearchConfiguration(description *firehose.ElasticsearchDestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } @@ -288,7 +288,7 @@ func flattenVpcConfiguration(description *firehose.VpcConfigurationDescription) return []map[string]interface{}{m} } -func flattenFirehoseExtendedS3Configuration(description *firehose.ExtendedS3DestinationDescription) []map[string]interface{} { +func flattenExtendedS3Configuration(description *firehose.ExtendedS3DestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } @@ -297,13 +297,13 @@ func flattenFirehoseExtendedS3Configuration(description *firehose.ExtendedS3Dest "bucket_arn": aws.StringValue(description.BucketARN), "cloudwatch_logging_options": flattenCloudwatchLoggingOptions(description.CloudWatchLoggingOptions), "compression_format": aws.StringValue(description.CompressionFormat), - "data_format_conversion_configuration": flattenFirehoseDataFormatConversionConfiguration(description.DataFormatConversionConfiguration), + "data_format_conversion_configuration": flattenDataFormatConversionConfiguration(description.DataFormatConversionConfiguration), "error_output_prefix": aws.StringValue(description.ErrorOutputPrefix), "prefix": aws.StringValue(description.Prefix), "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), "dynamic_partitioning_configuration": flattenDynamicPartitioningConfiguration(description.DynamicPartitioningConfiguration), "role_arn": aws.StringValue(description.RoleARN), - "s3_backup_configuration": flattenFirehoseS3Configuration(description.S3BackupDescription), + "s3_backup_configuration": flattenS3Configuration(description.S3BackupDescription), "s3_backup_mode": aws.StringValue(description.S3BackupMode), } @@ -319,7 +319,7 @@ func flattenFirehoseExtendedS3Configuration(description *firehose.ExtendedS3Dest return []map[string]interface{}{m} } -func flattenFirehoseRedshiftConfiguration(description *firehose.RedshiftDestinationDescription, configuredPassword string) []map[string]interface{} { +func flattenRedshiftConfiguration(description *firehose.RedshiftDestinationDescription, configuredPassword string) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } @@ -330,7 +330,7 @@ func flattenFirehoseRedshiftConfiguration(description *firehose.RedshiftDestinat "password": configuredPassword, "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), "role_arn": aws.StringValue(description.RoleARN), - "s3_backup_configuration": flattenFirehoseS3Configuration(description.S3BackupDescription), + "s3_backup_configuration": flattenS3Configuration(description.S3BackupDescription), "s3_backup_mode": aws.StringValue(description.S3BackupMode), "username": aws.StringValue(description.Username), } @@ -348,7 +348,7 @@ func flattenFirehoseRedshiftConfiguration(description *firehose.RedshiftDestinat return []map[string]interface{}{m} } -func flattenFirehoseSplunkConfiguration(description *firehose.SplunkDestinationDescription) []map[string]interface{} { +func flattenSplunkConfiguration(description *firehose.SplunkDestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } @@ -369,7 +369,7 @@ func flattenFirehoseSplunkConfiguration(description *firehose.SplunkDestinationD return []map[string]interface{}{m} } -func flattenFirehoseS3Configuration(description *firehose.S3DestinationDescription) []map[string]interface{} { +func flattenS3Configuration(description *firehose.S3DestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } @@ -394,15 +394,15 @@ func flattenFirehoseS3Configuration(description *firehose.S3DestinationDescripti return []map[string]interface{}{m} } -func flattenFirehoseDataFormatConversionConfiguration(dfcc *firehose.DataFormatConversionConfiguration) []map[string]interface{} { +func flattenDataFormatConversionConfiguration(dfcc *firehose.DataFormatConversionConfiguration) []map[string]interface{} { if dfcc == nil { return []map[string]interface{}{} } enabled := aws.BoolValue(dfcc.Enabled) - ifc := flattenFirehoseInputFormatConfiguration(dfcc.InputFormatConfiguration) - ofc := flattenFirehoseOutputFormatConfiguration(dfcc.OutputFormatConfiguration) - sc := flattenFirehoseSchemaConfiguration(dfcc.SchemaConfiguration) + ifc := flattenInputFormatConfiguration(dfcc.InputFormatConfiguration) + ofc := flattenOutputFormatConfiguration(dfcc.OutputFormatConfiguration) + sc := flattenSchemaConfiguration(dfcc.SchemaConfiguration) // The AWS SDK can represent "no data format conversion configuration" in two ways: // 1. With a nil value @@ -423,32 +423,32 @@ func flattenFirehoseDataFormatConversionConfiguration(dfcc *firehose.DataFormatC return []map[string]interface{}{m} } -func flattenFirehoseInputFormatConfiguration(ifc *firehose.InputFormatConfiguration) []map[string]interface{} { +func flattenInputFormatConfiguration(ifc *firehose.InputFormatConfiguration) []map[string]interface{} { if ifc == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "deserializer": flattenFirehoseDeserializer(ifc.Deserializer), + "deserializer": flattenDeserializer(ifc.Deserializer), } return []map[string]interface{}{m} } -func flattenFirehoseDeserializer(deserializer *firehose.Deserializer) []map[string]interface{} { +func flattenDeserializer(deserializer *firehose.Deserializer) []map[string]interface{} { if deserializer == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "hive_json_ser_de": flattenFirehoseHiveJsonSerDe(deserializer.HiveJsonSerDe), - "open_x_json_ser_de": flattenFirehoseOpenXJsonSerDe(deserializer.OpenXJsonSerDe), + "hive_json_ser_de": flattenHiveJSONSerDe(deserializer.HiveJsonSerDe), + "open_x_json_ser_de": flattenOpenXJSONSerDe(deserializer.OpenXJsonSerDe), } return []map[string]interface{}{m} } -func flattenFirehoseHiveJsonSerDe(hjsd *firehose.HiveJsonSerDe) []map[string]interface{} { +func flattenHiveJSONSerDe(hjsd *firehose.HiveJsonSerDe) []map[string]interface{} { if hjsd == nil { return []map[string]interface{}{} } @@ -460,7 +460,7 @@ func flattenFirehoseHiveJsonSerDe(hjsd *firehose.HiveJsonSerDe) []map[string]int return []map[string]interface{}{m} } -func flattenFirehoseOpenXJsonSerDe(oxjsd *firehose.OpenXJsonSerDe) []map[string]interface{} { +func flattenOpenXJSONSerDe(oxjsd *firehose.OpenXJsonSerDe) []map[string]interface{} { if oxjsd == nil { return []map[string]interface{}{} } @@ -481,32 +481,32 @@ func flattenFirehoseOpenXJsonSerDe(oxjsd *firehose.OpenXJsonSerDe) []map[string] return []map[string]interface{}{m} } -func flattenFirehoseOutputFormatConfiguration(ofc *firehose.OutputFormatConfiguration) []map[string]interface{} { +func flattenOutputFormatConfiguration(ofc *firehose.OutputFormatConfiguration) []map[string]interface{} { if ofc == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "serializer": flattenFirehoseSerializer(ofc.Serializer), + "serializer": flattenSerializer(ofc.Serializer), } return []map[string]interface{}{m} } -func flattenFirehoseSerializer(serializer *firehose.Serializer) []map[string]interface{} { +func flattenSerializer(serializer *firehose.Serializer) []map[string]interface{} { if serializer == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "orc_ser_de": flattenFirehoseOrcSerDe(serializer.OrcSerDe), - "parquet_ser_de": flattenFirehoseParquetSerDe(serializer.ParquetSerDe), + "orc_ser_de": flattenOrcSerDe(serializer.OrcSerDe), + "parquet_ser_de": flattenParquetSerDe(serializer.ParquetSerDe), } return []map[string]interface{}{m} } -func flattenFirehoseOrcSerDe(osd *firehose.OrcSerDe) []map[string]interface{} { +func flattenOrcSerDe(osd *firehose.OrcSerDe) []map[string]interface{} { if osd == nil { return []map[string]interface{}{} } @@ -558,7 +558,7 @@ func flattenFirehoseOrcSerDe(osd *firehose.OrcSerDe) []map[string]interface{} { return []map[string]interface{}{m} } -func flattenFirehoseParquetSerDe(psd *firehose.ParquetSerDe) []map[string]interface{} { +func flattenParquetSerDe(psd *firehose.ParquetSerDe) []map[string]interface{} { if psd == nil { return []map[string]interface{}{} } @@ -594,7 +594,7 @@ func flattenFirehoseParquetSerDe(psd *firehose.ParquetSerDe) []map[string]interf return []map[string]interface{}{m} } -func flattenFirehoseSchemaConfiguration(sc *firehose.SchemaConfiguration) []map[string]interface{} { +func flattenSchemaConfiguration(sc *firehose.SchemaConfiguration) []map[string]interface{} { if sc == nil { return []map[string]interface{}{} } @@ -753,47 +753,47 @@ func flattenDeliveryStream(d *schema.ResourceData, s *firehose.DeliveryStreamDes if len(s.Destinations) > 0 { destination := s.Destinations[0] if destination.RedshiftDestinationDescription != nil { - d.Set("destination", firehoseDestinationTypeRedshift) + d.Set("destination", destinationTypeRedshift) configuredPassword := d.Get("redshift_configuration.0.password").(string) - if err := d.Set("redshift_configuration", flattenFirehoseRedshiftConfiguration(destination.RedshiftDestinationDescription, configuredPassword)); err != nil { + if err := d.Set("redshift_configuration", flattenRedshiftConfiguration(destination.RedshiftDestinationDescription, configuredPassword)); err != nil { return fmt.Errorf("error setting redshift_configuration: %s", err) } - if err := d.Set("s3_configuration", flattenFirehoseS3Configuration(destination.RedshiftDestinationDescription.S3DestinationDescription)); err != nil { + if err := d.Set("s3_configuration", flattenS3Configuration(destination.RedshiftDestinationDescription.S3DestinationDescription)); err != nil { return fmt.Errorf("error setting s3_configuration: %s", err) } } else if destination.ElasticsearchDestinationDescription != nil { - d.Set("destination", firehoseDestinationTypeElasticsearch) - if err := d.Set("elasticsearch_configuration", flattenFirehoseElasticsearchConfiguration(destination.ElasticsearchDestinationDescription)); err != nil { + d.Set("destination", destinationTypeElasticsearch) + if err := d.Set("elasticsearch_configuration", flattenElasticsearchConfiguration(destination.ElasticsearchDestinationDescription)); err != nil { return fmt.Errorf("error setting elasticsearch_configuration: %s", err) } - if err := d.Set("s3_configuration", flattenFirehoseS3Configuration(destination.ElasticsearchDestinationDescription.S3DestinationDescription)); err != nil { + if err := d.Set("s3_configuration", flattenS3Configuration(destination.ElasticsearchDestinationDescription.S3DestinationDescription)); err != nil { return fmt.Errorf("error setting s3_configuration: %s", err) } } else if destination.SplunkDestinationDescription != nil { - d.Set("destination", firehoseDestinationTypeSplunk) - if err := d.Set("splunk_configuration", flattenFirehoseSplunkConfiguration(destination.SplunkDestinationDescription)); err != nil { + d.Set("destination", destinationTypeSplunk) + if err := d.Set("splunk_configuration", flattenSplunkConfiguration(destination.SplunkDestinationDescription)); err != nil { return fmt.Errorf("error setting splunk_configuration: %s", err) } - if err := d.Set("s3_configuration", flattenFirehoseS3Configuration(destination.SplunkDestinationDescription.S3DestinationDescription)); err != nil { + if err := d.Set("s3_configuration", flattenS3Configuration(destination.SplunkDestinationDescription.S3DestinationDescription)); err != nil { return fmt.Errorf("error setting s3_configuration: %s", err) } } else if destination.HttpEndpointDestinationDescription != nil { - d.Set("destination", firehoseDestinationTypeHttpEndpoint) + d.Set("destination", destinationTypeHttpEndpoint) configuredAccessKey := d.Get("http_endpoint_configuration.0.access_key").(string) if err := d.Set("http_endpoint_configuration", flattenHTTPEndpointConfiguration(destination.HttpEndpointDestinationDescription, configuredAccessKey)); err != nil { return fmt.Errorf("error setting http_endpoint_configuration: %s", err) } - if err := d.Set("s3_configuration", flattenFirehoseS3Configuration(destination.HttpEndpointDestinationDescription.S3DestinationDescription)); err != nil { + if err := d.Set("s3_configuration", flattenS3Configuration(destination.HttpEndpointDestinationDescription.S3DestinationDescription)); err != nil { return fmt.Errorf("error setting s3_configuration: %s", err) } - } else if d.Get("destination").(string) == firehoseDestinationTypeS3 { - d.Set("destination", firehoseDestinationTypeS3) - if err := d.Set("s3_configuration", flattenFirehoseS3Configuration(destination.S3DestinationDescription)); err != nil { + } else if d.Get("destination").(string) == destinationTypeS3 { + d.Set("destination", destinationTypeS3) + if err := d.Set("s3_configuration", flattenS3Configuration(destination.S3DestinationDescription)); err != nil { return fmt.Errorf("error setting s3_configuration: %s", err) } } else { - d.Set("destination", firehoseDestinationTypeExtendedS3) - if err := d.Set("extended_s3_configuration", flattenFirehoseExtendedS3Configuration(destination.ExtendedS3DestinationDescription)); err != nil { + d.Set("destination", destinationTypeExtendedS3) + if err := d.Set("extended_s3_configuration", flattenExtendedS3Configuration(destination.ExtendedS3DestinationDescription)); err != nil { return fmt.Errorf("error setting extended_s3_configuration: %s", err) } } @@ -936,12 +936,12 @@ func ResourceDeliveryStream() *schema.Resource { return strings.ToLower(value) }, ValidateFunc: validation.StringInSlice([]string{ - firehoseDestinationTypeS3, - firehoseDestinationTypeExtendedS3, - firehoseDestinationTypeRedshift, - firehoseDestinationTypeElasticsearch, - firehoseDestinationTypeSplunk, - firehoseDestinationTypeHttpEndpoint, + destinationTypeS3, + destinationTypeExtendedS3, + destinationTypeRedshift, + destinationTypeElasticsearch, + destinationTypeSplunk, + destinationTypeHttpEndpoint, }, false), }, @@ -1764,10 +1764,6 @@ func updateExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat ProcessingConfiguration: extractProcessingConfiguration(s3), } - if _, ok := s3["dynamic_partitioning_configuration"]; ok { - configuration.DynamicPartitioningConfiguration = extractDynamicPartitioningConfiguration(s3) - } - if _, ok := s3["cloudwatch_logging_options"]; ok { configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) } @@ -2571,33 +2567,33 @@ func resourceDeliveryStreamCreate(d *schema.ResourceData, meta interface{}) erro createInput.DeliveryStreamType = aws.String(firehose.DeliveryStreamTypeDirectPut) } - if d.Get("destination").(string) == firehoseDestinationTypeExtendedS3 { + if d.Get("destination").(string) == destinationTypeExtendedS3 { extendedS3Config := createExtendedS3Config(d) createInput.ExtendedS3DestinationConfiguration = extendedS3Config } else { s3Config := createS3Config(d) - if d.Get("destination").(string) == firehoseDestinationTypeS3 { + if d.Get("destination").(string) == destinationTypeS3 { createInput.S3DestinationConfiguration = s3Config - } else if d.Get("destination").(string) == firehoseDestinationTypeElasticsearch { + } else if d.Get("destination").(string) == destinationTypeElasticsearch { esConfig, err := createElasticsearchConfig(d, s3Config) if err != nil { return err } createInput.ElasticsearchDestinationConfiguration = esConfig - } else if d.Get("destination").(string) == firehoseDestinationTypeRedshift { + } else if d.Get("destination").(string) == destinationTypeRedshift { rc, err := createRedshiftConfig(d, s3Config) if err != nil { return err } createInput.RedshiftDestinationConfiguration = rc - } else if d.Get("destination").(string) == firehoseDestinationTypeSplunk { + } else if d.Get("destination").(string) == destinationTypeSplunk { rc, err := createSplunkConfig(d, s3Config) if err != nil { return err } createInput.SplunkDestinationConfiguration = rc - } else if d.Get("destination").(string) == firehoseDestinationTypeHttpEndpoint { + } else if d.Get("destination").(string) == destinationTypeHttpEndpoint { rc, err := createHTTPEndpointConfig(d, s3Config) if err != nil { return err @@ -2681,7 +2677,7 @@ func validSchema(d *schema.ResourceData) error { _, s3Exists := d.GetOk("s3_configuration") _, extendedS3Exists := d.GetOk("extended_s3_configuration") - if d.Get("destination").(string) == firehoseDestinationTypeExtendedS3 { + if d.Get("destination").(string) == destinationTypeExtendedS3 { if !extendedS3Exists { return fmt.Errorf( "When destination is 'extended_s3', extended_s3_configuration is required", @@ -2723,33 +2719,33 @@ func resourceDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) erro DestinationId: aws.String(d.Get("destination_id").(string)), } - if d.Get("destination").(string) == firehoseDestinationTypeExtendedS3 { + if d.Get("destination").(string) == destinationTypeExtendedS3 { extendedS3Config := updateExtendedS3Config(d) updateInput.ExtendedS3DestinationUpdate = extendedS3Config } else { s3Config := updateS3Config(d) - if d.Get("destination").(string) == firehoseDestinationTypeS3 { + if d.Get("destination").(string) == destinationTypeS3 { updateInput.S3DestinationUpdate = s3Config - } else if d.Get("destination").(string) == firehoseDestinationTypeElasticsearch { + } else if d.Get("destination").(string) == destinationTypeElasticsearch { esUpdate, err := updateElasticsearchConfig(d, s3Config) if err != nil { return err } updateInput.ElasticsearchDestinationUpdate = esUpdate - } else if d.Get("destination").(string) == firehoseDestinationTypeRedshift { + } else if d.Get("destination").(string) == destinationTypeRedshift { rc, err := updateRedshiftConfig(d, s3Config) if err != nil { return err } updateInput.RedshiftDestinationUpdate = rc - } else if d.Get("destination").(string) == firehoseDestinationTypeSplunk { + } else if d.Get("destination").(string) == destinationTypeSplunk { rc, err := updateSplunkConfig(d, s3Config) if err != nil { return err } updateInput.SplunkDestinationUpdate = rc - } else if d.Get("destination").(string) == firehoseDestinationTypeHttpEndpoint { + } else if d.Get("destination").(string) == destinationTypeHttpEndpoint { rc, err := updateHTTPEndpointConfig(d, s3Config) if err != nil { return err From 81d03193b01b533039646d54d468742ab04b025f Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 19:21:51 -0500 Subject: [PATCH 257/304] Uniquify role names --- .../service/firehose/delivery_stream_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/service/firehose/delivery_stream_test.go b/internal/service/firehose/delivery_stream_test.go index 8c6f506f8efc..3dd66a3e199c 100644 --- a/internal/service/firehose/delivery_stream_test.go +++ b/internal/service/firehose/delivery_stream_test.go @@ -1270,7 +1270,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { CheckDestroy: testAccCheckDeliveryStreamDestroy, Steps: []resource.TestStep{ { - Config: testAccDeliveryStreamConfig_ElasticsearchVpcBasic(rName), + Config: testAccDeliveryStreamConfig_ElasticsearchVPCBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil), @@ -1286,7 +1286,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccDeliveryStreamConfig_ElasticsearchVpcUpdate(rName), + Config: testAccDeliveryStreamConfig_ElasticsearchVPCUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticsearchConfig, nil, nil), @@ -3007,7 +3007,7 @@ EOF } // Elasticsearch associated with VPC -func testAccDeliveryStreamBaseElasticsearchVpcConfig(rName string) string { +func testAccDeliveryStreamBaseElasticsearchVPCConfig(rName string) string { return acctest.ConfigCompose( testAccDeliveryStreamBaseConfig(rName), fmt.Sprintf(` @@ -3077,7 +3077,7 @@ resource "aws_elasticsearch_domain" "test_cluster" { } resource "aws_iam_role_policy" "firehose-elasticsearch" { - name = %[1]q + name = "%[1]s-es" role = aws_iam_role.firehose.id policy = < Date: Wed, 17 Nov 2021 16:43:23 -0800 Subject: [PATCH 258/304] Corrects `include` and removes `exclude` declarations from `prefer-aws-go-sdk-pointer-conversion-conditional` --- .semgrep.yml | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/.semgrep.yml b/.semgrep.yml index b5ca7b36efbc..1c6bc7acafcb 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -73,32 +73,8 @@ rules: languages: [go] message: Prefer AWS Go SDK pointer conversion functions for dereferencing during conditionals, e.g. aws.StringValue() paths: - exclude: - - aws/cloudfront_distribution_configuration_structure.go - - aws/cloudfront_distribution_configuration_structure_test.go - - aws/config.go - - aws/data_source_aws_route* - - aws/ecs_task_definition_equivalency.go - - aws/opsworks_layers.go - - aws/resource_aws_d*.go - - aws/resource_aws_e*.go - - aws/resource_aws_g*.go - - aws/resource_aws_i*.go - - aws/resource_aws_k*.go - - aws/resource_aws_l*.go - - aws/resource_aws_main_route_table_association.go - - aws/resource_aws_n*.go - - aws/resource_aws_o*.go - - aws/resource_aws_r*.go - - aws/resource_aws_s*.go - - aws/resource*_test.go - - aws/structure.go - - aws/internal/generators/ - - aws/internal/keyvaluetags/ - - aws/internal/naming/ - - providerlint/vendor/ include: - - aws/ + - internal/service patterns: - pattern-either: - pattern: '$LHS == *$RHS' From 64f4f869b132672997d3dc45478cf8ef38e352ee Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 17 Nov 2021 20:35:21 -0500 Subject: [PATCH 259/304] Remove duplicate set, unused call --- internal/service/firehose/delivery_stream.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index d8770dd5d83c..7fd43bcf7f83 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -1679,10 +1679,6 @@ func createExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) } - if _, ok := s3["dynamic_partitioning_configuration"]; ok { - configuration.DynamicPartitioningConfiguration = extractDynamicPartitioningConfiguration(s3) - } - if v, ok := s3["error_output_prefix"]; ok && v.(string) != "" { configuration.ErrorOutputPrefix = aws.String(v.(string)) } @@ -1982,14 +1978,6 @@ func extractDynamicPartitioningConfiguration(s3 map[string]interface{}) *firehos return DynamicPartitioningConfiguration } -func extractRetryOptions(ro []interface{}) *firehose.RetryOptions { - options := ro[0].(map[string]interface{}) - - return &firehose.RetryOptions{ - DurationInSeconds: aws.Int64(int64(options["duration_in_seconds"].(int))), - } -} - func extractProcessingConfiguration(s3 map[string]interface{}) *firehose.ProcessingConfiguration { config := s3["processing_configuration"].([]interface{}) if len(config) == 0 || config[0] == nil { From ab0add9ec9568b4c48601eec89323e0cffb35ad1 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 17 Nov 2021 17:43:51 -0800 Subject: [PATCH 260/304] Revert "Semgrep: Try removing restrictions" --- .semgrep.yml | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/.semgrep.yml b/.semgrep.yml index 2cbc3a8421cc..e4f58f08fd81 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -70,8 +70,32 @@ rules: languages: [go] message: Prefer AWS Go SDK pointer conversion functions for dereferencing during conditionals, e.g. aws.StringValue() paths: + exclude: + - aws/cloudfront_distribution_configuration_structure.go + - aws/cloudfront_distribution_configuration_structure_test.go + - aws/config.go + - aws/data_source_aws_route* + - aws/ecs_task_definition_equivalency.go + - aws/opsworks_layers.go + - aws/resource_aws_d*.go + - aws/resource_aws_e*.go + - aws/resource_aws_g*.go + - aws/resource_aws_i*.go + - aws/resource_aws_k*.go + - aws/resource_aws_l*.go + - aws/resource_aws_main_route_table_association.go + - aws/resource_aws_n*.go + - aws/resource_aws_o*.go + - aws/resource_aws_r*.go + - aws/resource_aws_s*.go + - aws/resource*_test.go + - aws/structure.go + - aws/internal/generators/ + - aws/internal/keyvaluetags/ + - aws/internal/naming/ + - providerlint/vendor/ include: - - internal/service + - aws/ patterns: - pattern-either: - pattern: '$LHS == *$RHS' From 37f2e23591004cd82e7566956b86a4c10f42d584 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 17 Nov 2021 21:12:48 -0500 Subject: [PATCH 261/304] CR updates; align with service packages structure; flex and enum --- internal/service/s3/bucket.go | 4 +- .../s3/bucket_replication_configuration.go | 625 +++----- .../bucket_replication_configuration_test.go | 1307 +++++++++-------- internal/service/s3/bucket_test.go | 64 + internal/service/s3/enum.go | 2 + internal/service/s3/flex.go | 686 +++++++++ 6 files changed, 1648 insertions(+), 1040 deletions(-) create mode 100644 internal/service/s3/flex.go diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 5bc4db22b414..9ff7a34dc70e 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -818,7 +818,7 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("replication_configuration") { - if err := resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn, d); err != nil { + if err := resourceBucketInternalReplicationConfigurationUpdate(conn, d); err != nil { return err } } @@ -2033,7 +2033,7 @@ func resourceBucketObjectLockConfigurationUpdate(conn *s3.S3, d *schema.Resource return nil } -func resourceAwsS3BucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { +func resourceBucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) replicationConfiguration := d.Get("replication_configuration").([]interface{}) diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index d41c3dc37c58..ec27a3fdc4d0 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -1,11 +1,8 @@ package s3 import ( - "errors" "fmt" "log" - "net/http" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -21,10 +18,10 @@ import ( func ResourceBucketReplicationConfiguration() *schema.Resource { return &schema.Resource{ - Create: resourceAwsS3BucketReplicationConfigurationPut, - Read: resourceAwsS3BucketReplicationConfigurationRead, - Update: resourceAwsS3BucketReplicationConfigurationUpdate, - Delete: resourceAwsS3BucketReplicationConfigurationDelete, + Create: resourceBucketReplicationConfigurationCreate, + Read: resourceBucketReplicationConfigurationRead, + Update: resourceBucketReplicationConfigurationUpdate, + Delete: resourceBucketReplicationConfigurationDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, @@ -32,34 +29,57 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "bucket": { Type: schema.TypeString, - Optional: true, - Computed: true, + Required: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(0, 63), + ValidateFunc: validation.StringLenBetween(1, 63), }, "role": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, }, - "rules": { + "rule": { Type: schema.TypeSet, Required: true, - Set: rulesHash, + //Set: rulesHash, + MaxItems: 1000, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 255), + "delete_marker_replication": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), + }, + }, + }, }, "destination": { Type: schema.TypeList, MaxItems: 1, - MinItems: 1, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "account_id": { + "access_control_translation": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + }, + }, + }, + }, + "account": { Type: schema.TypeString, Optional: true, ValidateFunc: verify.ValidAccountID, @@ -69,26 +89,16 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, - "storage_class": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), - }, - "replica_kms_key_id": { - Type: schema.TypeString, - Optional: true, - }, - "access_control_translation": { + "encryption_configuration": { Type: schema.TypeList, Optional: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "owner": { + "replica_kms_key_id": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + ValidateFunc: verify.ValidARN, }, }, }, @@ -96,38 +106,38 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "metrics": { Type: schema.TypeList, Optional: true, - MinItems: 1, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), - }, "event_threshold": { Type: schema.TypeList, Required: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + // Currently, the S3 API only supports 15 minutes; + // however, to account for future changes, validation + // is left at positive integers. ValidateFunc: validation.IntAtLeast(0), }, }, }, }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), + }, }, }, }, "replication_time": { Type: schema.TypeList, Optional: true, - MinItems: 1, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { @@ -138,13 +148,15 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "time": { Type: schema.TypeList, Required: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + // Currently, the S3 API only supports 15 minutes; + // however, to account for future changes, validation + // is left at positive integers. ValidateFunc: validation.IntAtLeast(0), }, }, @@ -153,42 +165,67 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { }, }, }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + }, }, }, }, - "source_selection_criteria": { + "existing_object_replication": { Type: schema.TypeList, Optional: true, - MinItems: 1, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "sse_kms_encrypted_objects": { + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), + }, + }, + }, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "and": { Type: schema.TypeList, Optional: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "status": { + "prefix": { Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, + "tags": tftags.TagsSchema(), }, }, }, - "replica_modifications": { + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "tag": { Type: schema.TypeList, - Optional: true, - MinItems: 1, MaxItems: 1, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, }, }, }, @@ -196,65 +233,61 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { }, }, }, - "prefix": { + "id": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringLenBetween(0, 1024), + ValidateFunc: validation.StringLenBetween(0, 255), }, - "status": { + "prefix": { Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, "priority": { Type: schema.TypeInt, Optional: true, }, - "filter": { + "source_selection_criteria": { Type: schema.TypeList, Optional: true, - MinItems: 1, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 1024), + "replica_modifications": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicaModificationsStatus_Values(), false), + }, + }, + }, }, - "tags": tftags.TagsSchema(), - }, - }, - }, - "existing_object_replication": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ExistingObjectReplicationStatus_Values(), false), + "sse_kms_encrypted_objects": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.SseKmsEncryptedObjectsStatus_Values(), false), + }, + }, + }, }, }, }, }, - "delete_marker_replication": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.DeleteMarkerReplicationStatus_Values(), false), - }, - }, - }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), }, }, }, @@ -263,354 +296,100 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { } } -func resourceAwsS3BucketReplicationConfigurationPut(d *schema.ResourceData, meta interface{}) error { - // Get the bucket - var bucket string - if v, ok := d.GetOk("bucket"); ok { - bucket = v.(string) - } else { - log.Printf("[ERROR] S3 Bucket name not set") - return errors.New("[ERROR] S3 Bucket name not set") - } - d.SetId(bucket) - - return resourceAwsS3BucketReplicationConfigurationUpdate(d, meta) -} +func resourceBucketReplicationConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn -func resourceAwsS3BucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + bucket := d.Get("bucket").(string) - if _, ok := d.GetOk("bucket"); !ok { - // during import operations, use the supplied ID for the bucket name - d.Set("bucket", d.Id()) + rc := &s3.ReplicationConfiguration{ + Role: aws.String(d.Get("role").(string)), + Rules: ExpandRules(d.Get("rule").(*schema.Set).List()), } - var bucket *string - input := &s3.HeadBucketInput{} - if rsp, ok := d.GetOk("bucket"); !ok { - log.Printf("[ERROR] S3 Bucket name not set") - return errors.New("[ERROR] S3 Bucket name not set") - } else { - bucket = aws.String(rsp.(string)) - input.Bucket = bucket + input := &s3.PutBucketReplicationInput{ + Bucket: aws.String(bucket), + ReplicationConfiguration: rc, } - conn := meta.(*conns.AWSClient).S3Conn - - err := resource.Retry(bucketCreatedTimeout, func() *resource.RetryError { - _, err := conn.HeadBucket(input) - - if d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { + err := resource.Retry(propagationTimeout, func() *resource.RetryError { + _, err := conn.PutBucketReplication(input) + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { return resource.RetryableError(err) } - - if d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - return resource.RetryableError(err) - } - if err != nil { return resource.NonRetryableError(err) } - return nil }) if tfresource.TimedOut(err) { - _, err = conn.HeadBucket(input) + _, err = conn.PutBucketReplication(input) } - if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, http.StatusNotFound) { - log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) - return nil + if err != nil { + return fmt.Errorf("error creating S3 replication configuration for bucket (%s): %w", bucket, err) } - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) - return nil - } + d.SetId(bucket) - if err != nil { - return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) + return resourceBucketReplicationConfigurationRead(d, meta) +} + +func resourceBucketReplicationConfigurationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn + + input := &s3.GetBucketReplicationInput{ + Bucket: aws.String(d.Id()), } // Read the bucket replication configuration - replicationResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: bucket, - }) + output, err := retryWhenBucketNotFound(func() (interface{}, error) { + return conn.GetBucketReplication(input) }) - if err != nil && !tfawserr.ErrMessageContains(err, "ReplicationConfigurationNotFoundError", "") { - return fmt.Errorf("error getting S3 Bucket replication: %s", err) - } - replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput) - if !ok || replication == nil { - return fmt.Errorf("error reading replication_configuration") - } - r := replication.ReplicationConfiguration - // set role - if r.Role != nil && aws.StringValue(r.Role) != "" { - d.Set("role", r.Role) - } - rules := make([]interface{}, 0, len(r.Rules)) - for _, v := range r.Rules { - t := make(map[string]interface{}) - if v.Destination != nil { - rd := make(map[string]interface{}) - if v.Destination.Bucket != nil { - rd["bucket"] = aws.StringValue(v.Destination.Bucket) - } - if v.Destination.StorageClass != nil { - rd["storage_class"] = aws.StringValue(v.Destination.StorageClass) - } - if v.Destination.EncryptionConfiguration != nil { - if v.Destination.EncryptionConfiguration.ReplicaKmsKeyID != nil { - rd["replica_kms_key_id"] = aws.StringValue(v.Destination.EncryptionConfiguration.ReplicaKmsKeyID) - } - } - if v.Destination.Account != nil { - rd["account_id"] = aws.StringValue(v.Destination.Account) - } - if v.Destination.AccessControlTranslation != nil { - rdt := map[string]interface{}{ - "owner": aws.StringValue(v.Destination.AccessControlTranslation.Owner), - } - rd["access_control_translation"] = []interface{}{rdt} - } - if v.Destination.ReplicationTime != nil { - drt := make(map[string]interface{}) - if v.Destination.ReplicationTime.Status != nil { - drt["status"] = aws.StringValue(v.Destination.ReplicationTime.Status) - drtm := make(map[string]interface{}) - drtm["minutes"] = aws.Int64Value(v.Destination.ReplicationTime.Time.Minutes) - drt["time"] = []interface{}{drtm} - rd["replication_time"] = []interface{}{drt} - } - } - if v.Destination.Metrics != nil { - dm := make(map[string]interface{}) - if v.Destination.Metrics.Status != nil { - dm["status"] = aws.StringValue(v.Destination.Metrics.Status) - dmetm := make(map[string]interface{}) - dmetm["minutes"] = aws.Int64Value(v.Destination.Metrics.EventThreshold.Minutes) - dm["event_threshold"] = []interface{}{dmetm} - rd["metrics"] = []interface{}{dm} - } - } - t["destination"] = []interface{}{rd} - } + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket Replication Configuration (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } - if v.ExistingObjectReplication != nil { - status := make(map[string]interface{}) - status["status"] = aws.StringValue(v.ExistingObjectReplication.Status) - t["existing_object_replication"] = []interface{}{status} - } + if err != nil { + return fmt.Errorf("error getting S3 Bucket Replication Configuration for bucket (%s): %w", d.Id(), err) + } - if v.ID != nil { - t["id"] = aws.StringValue(v.ID) - } - if v.Prefix != nil { - t["prefix"] = aws.StringValue(v.Prefix) - } - if v.Status != nil { - t["status"] = aws.StringValue(v.Status) - } - if vssc := v.SourceSelectionCriteria; vssc != nil { - tssc := make(map[string]interface{}) - if vssc.SseKmsEncryptedObjects != nil { - tSseKms := make(map[string]interface{}) - tSseKms["status"] = aws.StringValue(vssc.SseKmsEncryptedObjects.Status) - tssc["sse_kms_encrypted_objects"] = []interface{}{tSseKms} - } - t["source_selection_criteria"] = []interface{}{tssc} - } + replication, ok := output.(*s3.GetBucketReplicationOutput) - if v.Priority != nil { - t["priority"] = int(aws.Int64Value(v.Priority)) - } + if !ok || replication == nil || replication.ReplicationConfiguration == nil { + return fmt.Errorf("error reading S3 Bucket Replication Configuration for bucket (%s): empty output", d.Id()) + } - if f := v.Filter; f != nil { - m := map[string]interface{}{} - if f.Prefix != nil { - m["prefix"] = aws.StringValue(f.Prefix) - } - if t := f.Tag; t != nil { - m["tags"] = KeyValueTags([]*s3.Tag{t}).IgnoreAWS().Map() - } - if a := f.And; a != nil { - m["prefix"] = aws.StringValue(a.Prefix) - m["tags"] = KeyValueTags(a.Tags).IgnoreAWS().Map() - } - t["filter"] = []interface{}{m} - - if v.DeleteMarkerReplication != nil && v.DeleteMarkerReplication.Status != nil { - status := make(map[string]interface{}) - status["status"] = aws.StringValue(v.DeleteMarkerReplication.Status) - t["delete_marker_replication"] = []interface{}{status} - } - } + r := replication.ReplicationConfiguration - rules = append(rules, t) + d.Set("bucket", d.Id()) + d.Set("role", r.Role) + if err := d.Set("rule", schema.NewSet(rulesHash, FlattenRules(r.Rules))); err != nil { + return fmt.Errorf("error setting rule: %w", err) } - d.Set("rules", schema.NewSet(rulesHash, rules)) return nil } -func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*conns.AWSClient).S3Conn - bucket := d.Get("bucket").(string) - - rc := &s3.ReplicationConfiguration{} - if val, ok := d.GetOk("role"); ok { - rc.Role = aws.String(val.(string)) - } - - rcRules := d.Get("rules").(*schema.Set).List() - rules := []*s3.ReplicationRule{} - for _, v := range rcRules { - rr := v.(map[string]interface{}) - rcRule := &s3.ReplicationRule{} - if status, ok := rr["status"]; ok && status != "" { - rcRule.Status = aws.String(status.(string)) - } else { - continue - } - - if rrid, ok := rr["id"]; ok && rrid != "" { - rcRule.ID = aws.String(rrid.(string)) - } - - eor := rr["existing_object_replication"].([]interface{}) - if len(eor) > 0 { - s := eor[0].(map[string]interface{}) - rcRule.ExistingObjectReplication = &s3.ExistingObjectReplication{ - Status: aws.String(s["status"].(string)), - } - } - - ruleDestination := &s3.Destination{} - if dest, ok := rr["destination"].([]interface{}); ok && len(dest) > 0 { - if dest[0] != nil { - bd := dest[0].(map[string]interface{}) - ruleDestination.Bucket = aws.String(bd["bucket"].(string)) - - if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { - ruleDestination.StorageClass = aws.String(storageClass.(string)) - } - - if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" { - ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ - ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)), - } - } - - if account, ok := bd["account_id"]; ok && account != "" { - ruleDestination.Account = aws.String(account.(string)) - } - - if aclTranslation, ok := bd["access_control_translation"].([]interface{}); ok && len(aclTranslation) > 0 { - aclTranslationValues := aclTranslation[0].(map[string]interface{}) - ruleAclTranslation := &s3.AccessControlTranslation{} - ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) - ruleDestination.AccessControlTranslation = ruleAclTranslation - } - - rt, ok := bd["replication_time"].([]interface{}) - if ok && len(rt) > 0 { - s := rt[0].(map[string]interface{}) - if t, ok := s["time"].([]interface{}); ok && len(t) > 0 { - m := t[0].(map[string]interface{}) - ruleDestination.ReplicationTime = &s3.ReplicationTime{ - Status: aws.String(s["status"].(string)), - Time: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(int64(m["minutes"].(int))), - }, - } - } - } - - rm, ok := bd["metrics"].([]interface{}) - if ok && len(rm) > 0 { - s := rm[0].(map[string]interface{}) - if et, ok := s["event_threshold"].([]interface{}); ok && len(et) > 0 { - m := et[0].(map[string]interface{}) - ruleDestination.Metrics = &s3.Metrics{ - Status: aws.String(s["status"].(string)), - EventThreshold: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(int64(m["minutes"].(int))), - }, - } - } - } - - } - } - rcRule.Destination = ruleDestination - - if ssc, ok := rr["source_selection_criteria"].([]interface{}); ok && len(ssc) > 0 { - if ssc[0] != nil { - sscValues := ssc[0].(map[string]interface{}) - ruleSsc := &s3.SourceSelectionCriteria{} - if sseKms, ok := sscValues["sse_kms_encrypted_objects"].([]interface{}); ok && len(sseKms) > 0 { - if sseKms[0] != nil { - sseKmsValues := sseKms[0].(map[string]interface{}) - sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} - sseKmsEncryptedObjects.Status = aws.String(sseKmsValues["status"].(string)) - ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects - } - } - if sscRm, ok := sscValues["replica_modifications"].([]interface{}); ok && len(sscRm) > 0 { - if sscRm[0] != nil { - replicaModValues := sscRm[0].(map[string]interface{}) - replicaModifications := &s3.ReplicaModifications{} - replicaModifications.Status = aws.String(replicaModValues["status"].(string)) - ruleSsc.ReplicaModifications = replicaModifications - } - } - rcRule.SourceSelectionCriteria = ruleSsc - } - } - - if f, ok := rr["filter"].([]interface{}); ok && len(f) > 0 && f[0] != nil { - // XML schema V2. - rcRule.Priority = aws.Int64(int64(rr["priority"].(int))) - rcRule.Filter = &s3.ReplicationRuleFilter{} - filter := f[0].(map[string]interface{}) - tags := Tags(tftags.New(filter["tags"]).IgnoreAWS()) - if len(tags) > 0 { - rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ - Prefix: aws.String(filter["prefix"].(string)), - Tags: tags, - } - } else { - rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) - } - - dmr, ok := rr["delete_marker_replication"].([]interface{}) - if ok && len(dmr) > 0 { - s := dmr[0].(map[string]interface{}) - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(s["status"].(string)), - } - } - } else { - // XML schema V1. - rcRule.Prefix = aws.String(rr["prefix"].(string)) - } +func resourceBucketReplicationConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn - rules = append(rules, rcRule) + rc := &s3.ReplicationConfiguration{ + Role: aws.String(d.Get("role").(string)), + Rules: ExpandRules(d.Get("rule").(*schema.Set).List()), } - rc.Rules = rules - i := &s3.PutBucketReplicationInput{ - Bucket: aws.String(bucket), + input := &s3.PutBucketReplicationInput{ + Bucket: aws.String(d.Id()), ReplicationConfiguration: rc, } - log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := s3conn.PutBucketReplication(i) - if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + err := resource.Retry(propagationTimeout, func() *resource.RetryError { + _, err := conn.PutBucketReplication(input) + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { return resource.RetryableError(err) } if err != nil { @@ -618,29 +397,33 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(d *schema.ResourceData, m } return nil }) + if tfresource.TimedOut(err) { - _, err = s3conn.PutBucketReplication(i) + _, err = conn.PutBucketReplication(input) } + if err != nil { - return fmt.Errorf("Error putting S3 replication configuration: %s", err) + return fmt.Errorf("error updating S3 replication configuration for bucket (%s): %w", d.Id(), err) } - return resourceAwsS3BucketReplicationConfigurationRead(d, meta) + return resourceBucketReplicationConfigurationRead(d, meta) } -func resourceAwsS3BucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*conns.AWSClient).S3Conn - bucket := d.Get("bucket").(string) +func resourceBucketReplicationConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).S3Conn - log.Printf("[DEBUG] S3 Delete Bucket Replication: %s", d.Id()) + input := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(d.Id()), + } + + _, err := conn.DeleteBucketReplication(input) - dbri := &s3.DeleteBucketReplicationInput{ - Bucket: aws.String(bucket), + if tfawserr.ErrCodeEquals(err, ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + return nil } - _, err := s3conn.DeleteBucketReplication(dbri) if err != nil { - return fmt.Errorf("Error removing S3 bucket replication: %s", err) + return fmt.Errorf("error deleting S3 bucket replication configuration for bucket (%s): %w", d.Id(), err) } return nil diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 39dbc1e16976..13c8819b5189 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -2,11 +2,7 @@ package s3_test import ( "fmt" - "reflect" - "sort" - "strings" "testing" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -17,16 +13,17 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_basic(t *testing.T) { + iamRoleResourceName := "aws_iam_role.test" + dstBucketResourceName := "aws_s3_bucket.destination" + kmsKeyResourceName := "aws_kms_key.test" + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ @@ -39,83 +36,91 @@ func TestAccAWSS3BucketReplicationConfig_basic(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Config: testAccBucketReplicationConfigurationBasic(rName, s3.StorageClassStandard), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.StorageClassStandard), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfig(rInt, "GLACIER"), + Config: testAccBucketReplicationConfigurationBasic(rName, s3.StorageClassGlacier), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.StorageClassGlacier), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassGlacier, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjects(rInt), + Config: testAccBucketReplicationConfigurationWithSseKmsEncryptedObjects(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - EncryptionConfiguration: &s3.EncryptionConfiguration{ - ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - SourceSelectionCriteria: &s3.SourceSelectionCriteria{ - SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ - Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), - }, - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.encryption_configuration.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + "source_selection_criteria.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": s3.SseKmsEncryptedObjectsStatusEnabled, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.encryption_configuration.0.replica_kms_key_id", kmsKeyResourceName, "arn"), + ), + }, + }, + }) +} + +func TestAccS3BucketReplicationConfiguration_disappears(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfigurationBasic(rName, s3.StorageClassStandard), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3.ResourceBucketReplicationConfiguration(), resourceName), ), + ExpectNonEmptyPlan: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *testing.T) { - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_multipleDestinationsEmptyFilter(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -130,52 +135,51 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsEmptyFilter(t *test CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), + Config: testAccBucketReplicationConfigurationWithMultipleDestinationsEmptyFilter(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "3"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": "STANDARD", + "destination.0.storage_class": s3.StorageClassStandard, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": "STANDARD_IA", + "destination.0.storage_class": s3.StorageClassStandardIa, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule3", "priority": "3", - "status": "Disabled", + "status": s3.ReplicationRuleStatusDisabled, "filter.#": "1", "filter.0.prefix": "", "destination.#": "1", - "destination.0.storage_class": "ONEZONE_IA", + "destination.0.storage_class": s3.StorageClassOnezoneIa, }), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsEmptyFilter(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *testing.T) { - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_multipleDestinationsNonEmptyFilter(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -190,44 +194,43 @@ func TestAccAWSS3BucketReplicationConfig_multipleDestinationsNonEmptyFilter(t *t CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), + Config: testAccBucketReplicationConfigurationWithMultipleDestinationsNonEmptyFilter(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": "STANDARD", + "destination.0.storage_class": s3.StorageClassStandard, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix2", "destination.#": "1", - "destination.0.storage_class": "STANDARD_IA", + "destination.0.storage_class": s3.StorageClassStandardIa, }), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsNonEmptyFilter(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { +func TestAccS3BucketReplicationConfiguration_twoDestination(t *testing.T) { // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -242,45 +245,46 @@ func TestAccAWSS3BucketReplicationConfig_twoDestination(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), + Config: testAccBucketReplicationConfigurationWithMultipleDestinationsTwoDestination(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "rules.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": "STANDARD", + "destination.0.storage_class": s3.StorageClassStandard, }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rules.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule2", "priority": "2", - "status": "Enabled", + "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", "filter.0.prefix": "prefix1", "destination.#": "1", - "destination.0.storage_class": "STANDARD_IA", + "destination.0.storage_class": s3.StorageClassStandardIa, }), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithMultipleDestinationsTwoDestination(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessControlTranslation(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAccessControlTranslation(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + callerIdentityDataSourceName := "data.aws_caller_identity.current" + iamRoleResourceName := "aws_iam_role.test" + dstBucketResourceName := "aws_s3_bucket.destination" + kmsKeyResourceName := "aws_kms_key.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -295,80 +299,69 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAccessContr CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Config: testAccBucketReplicationConfigurationWithAccessControlTranslation(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - AccessControlTranslation: &s3.AccessControlTranslation{ - Owner: aws.String("Destination"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.access_control_translation.#": "1", + "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, { - Config: testAccAWSS3BucketReplicationConfigWithSseKmsEncryptedObjectsAndAccessControlTranslation(rInt), + Config: testAccBucketReplicationConfigurationWithSseKmsEncryptedObjectsAndAccessControlTranslation(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - EncryptionConfiguration: &s3.EncryptionConfiguration{ - ReplicaKmsKeyID: aws.String("${aws_kms_key.replica.arn}"), - }, - AccessControlTranslation: &s3.AccessControlTranslation{ - Owner: aws.String("Destination"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - SourceSelectionCriteria: &s3.SourceSelectionCriteria{ - SseKmsEncryptedObjects: &s3.SseKmsEncryptedObjects{ - Status: aws.String(s3.SseKmsEncryptedObjectsStatusEnabled), - }, - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.access_control_translation.#": "1", + "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, + "destination.0.encryption_configuration.#": "1", + "source_selection_criteria.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.#": "1", + "source_selection_criteria.0.sse_kms_encrypted_objects.0.status": s3.SseKmsEncryptedObjectsStatusEnabled, + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.encryption_configuration.0.replica_kms_key_id", kmsKeyResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12480 -func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_configurationRuleDestinationAddAccessControlTranslation(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + callerIdentityDataSourceName := "data.aws_caller_identity.current" + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -383,68 +376,60 @@ func TestAccAWSS3BucketReplicationConfig_configurationRuleDestinationAddAccessCo CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigConfigurationRulesDestination(rInt), + Config: testAccBucketReplicationConfigurationRulesDestination(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl", "versioning"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, { - Config: testAccAWSS3BucketReplicationConfigWithAccessControlTranslation(rInt), + Config: testAccBucketReplicationConfigurationWithAccessControlTranslation(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Account: aws.String("${data.aws_caller_identity.current.account_id}"), - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - AccessControlTranslation: &s3.AccessControlTranslation{ - Owner: aws.String("Destination"), - }, - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.access_control_translation.#": "1", + "destination.0.access_control_translation.0.owner": s3.OwnerOverrideDestination, + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.account", callerIdentityDataSourceName, "account_id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_replicationTimeControl(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -459,52 +444,45 @@ func TestAccAWSS3BucketReplicationConfig_replicationTimeControl(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigRTC(rInt), + Config: testAccBucketReplicationConfigurationRTC(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Priority: aws.Int64(0), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - ReplicationTime: &s3.ReplicationTime{ - Status: aws.String(s3.ReplicationTimeStatusEnabled), - Time: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(15), - }, - }, - Metrics: &s3.Metrics{ - Status: aws.String(s3.MetricsStatusEnabled), - EventThreshold: &s3.ReplicationTimeValue{ - Minutes: aws.Int64(15), - }, - }, - }, - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "filter.#": "1", + "filter.0.prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "destination.#": "1", + "destination.0.replication_time.#": "1", + "destination.0.replication_time.0.status": s3.ReplicationTimeStatusEnabled, + "destination.0.replication_time.0.time.#": "1", + "destination.0.replication_time.0.time.0.minutes": "15", + "destination.0.metrics.#": "1", + //"destination.0.metrics.0.status": s3.MetricsStatusEnabled, + //"destination.0.metrics.0.event_threshold.#": "1", + //"destination.0.metrics.0.event_threshold.0.minutes": "15", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_replicaModifications(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -519,44 +497,41 @@ func TestAccAWSS3BucketReplicationConfig_replicaModifications(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigReplicaMods(rInt), + Config: testAccBucketReplicationConfigurationReplicaMods(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Priority: aws.Int64(0), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - }, - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - SourceSelectionCriteria: &s3.SourceSelectionCriteria{ - ReplicaModifications: &s3.ReplicaModifications{ - Status: aws.String(s3.ReplicaModificationsStatusEnabled), - }, - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "filter.#": "1", + "filter.0.prefix": "foo", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "source_selection_criteria.#": "1", + "source_selection_criteria.0.replica_modifications.#": "1", + "source_selection_criteria.0.replica_modifications.0.status": s3.ReplicaModificationsStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 -func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_withoutStorageClass(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -571,25 +546,34 @@ func TestAccAWSS3BucketReplicationConfig_withoutStorageClass(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - Check: resource.ComposeTestCheckFunc(), + Config: testAccBucketReplicationConfigurationWithoutStorageClass(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "prefix": "foo", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), }, { - Config: testAccAWSS3BucketReplicationConfigWithoutStorageClass(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" +func TestAccS3BucketReplicationConfiguration_schemaV2(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -604,48 +588,39 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), + Config: testAccBucketReplicationConfigurationWithV2ConfigurationNoTags(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("foo"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "filter.#": "1", + "filter.0.prefix": "foo", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfigWithV2ConfigurationNoTags(rInt), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { - resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := sdkacctest.RandInt() +func TestAccS3BucketReplicationConfiguration_schemaV2SameRegion(t *testing.T) { rName := sdkacctest.RandomWithPrefix("tf-acc-test") rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_s3_bucket_replication_configuration.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -657,57 +632,41 @@ func TestAccAWSS3BucketReplicationConfig_schemaV2SameRegion(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), + Config: testAccBucketReplicationConfiguration_schemaV2SameRegion(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( - acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("testid"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("testprefix"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - }, - }, - ), + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "testid", + "filter.#": "1", + "filter.0.prefix": "testprefix", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfig_schemaV2SameRegion(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "force_destroy", "acl"}, }, }, }) } -const isExistingObjectReplicationBlocked = true +func TestAccS3BucketReplicationConfiguration_existingObjectReplication(t *testing.T) { + t.Skipf("skipping test: AWS Technical Support request required to allow ExistingObjectReplication") -func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) { - if isExistingObjectReplicationBlocked { - /* https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication - A request to AWS Technical Support needs to be made in order to allow ExistingObjectReplication. - Once that request is approved, this can be unblocked for testing. */ - return - } - resourceName := "aws_s3_bucket_replication_configuration.replication" - rInt := sdkacctest.RandInt() - rName := sdkacctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rNameDestination := sdkacctest.RandomWithPrefix("tf-acc-test") + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider @@ -719,94 +678,114 @@ func TestAccAWSS3BucketReplicationConfig_existingObjectReplication(t *testing.T) CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), + Config: testAccBucketReplicationConfiguration_existingObjectReplication(rName, rNameDestination), Check: resource.ComposeTestCheckFunc( - acctest.CheckResourceAttrGlobalARN(resourceName, "role", "iam", fmt.Sprintf("role/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("testid"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::%s", acctest.Partition(), rNameDestination)), - StorageClass: aws.String(s3.ObjectStorageClassStandard), - }, - Status: aws.String(s3.ReplicationRuleStatusEnabled), - Filter: &s3.ReplicationRuleFilter{ - Prefix: aws.String("testprefix"), - }, - Priority: aws.Int64(0), - DeleteMarkerReplication: &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusEnabled), - }, - ExistingObjectReplication: &s3.ExistingObjectReplication{ - Status: aws.String(s3.ExistingObjectReplicationStatusEnabled), - }, - }, - }, - ), + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "testid", + "filter.#": "1", + "filter.0.prefix": "testprefix", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusEnabled, + "existing_object_replication.#": "1", + "existing_object_replication.0.status": s3.ExistingObjectReplicationStatusEnabled, + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassStandard, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { - Config: testAccAWSS3BucketReplicationConfig_existingObjectReplication(rName, rNameDestination, rInt), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "force_destroy", "acl"}, }, }, }) } -func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { - rInt := sdkacctest.RandInt() - partition := acctest.Partition() - iamRoleResourceName := "aws_iam_role.role" - resourceName := "aws_s3_bucket_replication_configuration.replication" - - testDeleted := func(r string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[r] - if ok { - return fmt.Errorf("Replication resource configuration %q should have been deleted.", r) - } - return nil - } - } +func TestAccS3BucketReplicationConfiguration_filter_tagFilter(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" // record the initialized providers so that we can use them to check for the instances in each region var providers []*schema.Provider resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(t) - acctest.PreCheckMultipleRegion(t, 2) + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfiguration_filter_tag(rName, "testkey", "testvalue"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "filter.0.tag.#": "1", + "filter.0.tag.0.key": "testkey", + "filter.0.tag.0.value": "testvalue", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, + }) +} + +func TestAccS3BucketReplicationConfiguration_filter_andOperator(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), ProviderFactories: acctest.FactoriesAlternate(&providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckReplicationConfigDestroy, &providers), Steps: []resource.TestStep{ { - Config: testAccAWSS3BucketReplicationConfig(rInt, "STANDARD"), + Config: testAccBucketReplicationConfiguration_filter_andOperator_prefixAndTags(rName, "testkey1", "testvalue1", "testkey2", "testvalue2"), Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "rules.#", "1"), - testAccCheckBucketReplicationRules( - resourceName, - []*s3.ReplicationRule{ - { - ID: aws.String("foobar"), - Destination: &s3.Destination{ - Bucket: aws.String(fmt.Sprintf("arn:%s:s3:::tf-test-bucket-destination-%d", partition, rInt)), - StorageClass: aws.String(s3.StorageClassStandard), - }, - Prefix: aws.String("foo"), - Status: aws.String(s3.ReplicationRuleStatusEnabled), - }, - }, - ), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "filter.0.and.#": "1", + "filter.0.and.0.prefix": "foo", + "filter.0.and.0.tags.%": "2", + "filter.0.and.0.tags.testkey1": "testvalue1", + "filter.0.and.0.tags.testkey2": "testvalue2", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), ), }, { @@ -815,113 +794,99 @@ func TestAccAWSS3BucketReplicationConfig_delete(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSS3BucketReplicationConfigBasic(rInt), - Check: resource.ComposeTestCheckFunc(testDeleted(resourceName)), + Config: testAccBucketReplicationConfiguration_filter_andOperator_tags(rName, "testkey1", "testvalue1", "testkey2", "testvalue2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "filter.0.and.#": "1", + "filter.0.and.0.tags.%": "2", + "filter.0.and.0.tags.testkey1": "testvalue1", + "filter.0.and.0.tags.testkey2": "testvalue2", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func testAccCheckBucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs := s.RootModule().Resources[n] - for _, rule := range rules { - if dest := rule.Destination; dest != nil { - if account := dest.Account; account != nil && strings.HasPrefix(aws.StringValue(dest.Account), "${") { - resourceReference := strings.Replace(aws.StringValue(dest.Account), "${", "", 1) - resourceReference = strings.Replace(resourceReference, "}", "", 1) - resourceReferenceParts := strings.Split(resourceReference, ".") - resourceAttribute := resourceReferenceParts[len(resourceReferenceParts)-1] - resourceName := strings.Join(resourceReferenceParts[:len(resourceReferenceParts)-1], ".") - value := s.RootModule().Resources[resourceName].Primary.Attributes[resourceAttribute] - dest.Account = aws.String(value) - } - if ec := dest.EncryptionConfiguration; ec != nil { - if ec.ReplicaKmsKeyID != nil { - key_arn := s.RootModule().Resources["aws_kms_key.replica"].Primary.Attributes["arn"] - ec.ReplicaKmsKeyID = aws.String(strings.Replace(*ec.ReplicaKmsKeyID, "${aws_kms_key.replica.arn}", key_arn, -1)) - } - } - } - // Sort filter tags by key. - if filter := rule.Filter; filter != nil { - if and := filter.And; and != nil { - if tags := and.Tags; tags != nil { - sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) - } - } - } +func testAccCheckReplicationConfigDestroy(s *terraform.State, provider *schema.Provider) error { + conn := provider.Meta().(*conns.AWSClient).S3Conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket_replication_configuration" { + continue } + input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn - out, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: aws.String(rs.Primary.ID), + output, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketReplication(input) }) - if err != nil && tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") { - return fmt.Errorf("S3 bucket not found") - } else if err != nil && rules == nil { - return nil - } else if err != nil { - return fmt.Errorf("GetReplicationConfiguration error: %v", err) - } - for _, rule := range out.ReplicationConfiguration.Rules { - // Sort filter tags by key. - if filter := rule.Filter; filter != nil { - if and := filter.And; and != nil { - if tags := and.Tags; tags != nil { - sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key }) - } - } - } + if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeReplicationConfigurationNotFound, s3.ErrCodeNoSuchBucket) { + continue } - if !reflect.DeepEqual(out.ReplicationConfiguration.Rules, rules) { - return fmt.Errorf("bad replication rules, expected: %v, got %v", rules, out.ReplicationConfiguration.Rules) + + if err != nil { + return err } - return nil + if replication, ok := output.(*s3.GetBucketReplicationOutput); ok && replication != nil && replication.ReplicationConfiguration != nil { + return fmt.Errorf("S3 Replication Configuration for bucket (%s) still exists", rs.Primary.ID) + } } -} -func testAccCheckReplicationConfigDestroy(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*conns.AWSClient).S3Conn + return nil +} - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_bucket_replication_configuration" { - continue +func testAccCheckBucketReplicationConfigurationExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) } - input := &s3.GetBucketReplicationInput{Bucket: aws.String(rs.Primary.ID)} - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.GetBucketReplication(input) - if tfawserr.ErrMessageContains(err, s3.ErrCodeNoSuchBucket, "") || tfawserr.ErrMessageContains(err, "NotFound", "") { - return nil - } + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } - if err != nil { - return resource.NonRetryableError(err) - } + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn - return resource.RetryableError(fmt.Errorf("AWS S3 Bucket Replication Configuration still exists: %s", rs.Primary.ID)) + output, err := conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(rs.Primary.ID), }) - if tfresource.TimedOut(err) { - _, err = conn.GetBucketReplication(input) - } - if err != nil { return err } + + if output == nil || output.ReplicationConfiguration == nil { + return fmt.Errorf("S3 Bucket Replication Configuration for bucket (%s) not found", rs.Primary.ID) + } + + return nil } - return nil } -func testAccAWSS3BucketReplicationConfigBasic(randInt int) string { +func testAccBucketReplicationConfigurationBase(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} -resource "aws_iam_role" "role" { - name = "tf-iam-role-replication-%[1]d" +resource "aws_iam_role" "test" { + name = %[1]q assume_role_policy = < 0 && v[0] != nil { + result.AccessControlTranslation = ExpandAccessControlTranslation(v) + } + + if v, ok := tfMap["account"].(string); ok && v != "" { + result.Account = aws.String(v) + } + + if v, ok := tfMap["bucket"].(string); ok && v != "" { + result.Bucket = aws.String(v) + } + + if v, ok := tfMap["encryption_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.EncryptionConfiguration = ExpandEncryptionConfiguration(v) + } + + if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.Metrics = ExpandMetrics(v) + } + + if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.ReplicationTime = ExpandReplicationTime(v) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + result.StorageClass = aws.String(v) + } + + return result +} + +func ExpandExistingObjectReplication(l []interface{}) *s3.ExistingObjectReplication { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ExistingObjectReplication{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandFilter(l []interface{}) *s3.ReplicationRuleFilter { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationRuleFilter{} + + if v, ok := tfMap["and"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.And = ExpandReplicationRuleAndOperator(v) + } + + if v, ok := tfMap["prefix"].(string); ok && v != "" { + result.Prefix = aws.String(v) + } + + if v, ok := tfMap["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + tags := Tags(tftags.New(v[0]).IgnoreAWS()) + if len(tags) > 0 { + result.Tag = tags[0] + } + } + + return result +} + +func ExpandMetrics(l []interface{}) *s3.Metrics { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.Metrics{} + + if v, ok := tfMap["event_threshold"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.EventThreshold = ExpandReplicationTimeValue(v) + } + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandReplicationRuleAndOperator(l []interface{}) *s3.ReplicationRuleAndOperator { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationRuleAndOperator{} + + if v, ok := tfMap["prefix"].(string); ok && v != "" { + result.Prefix = aws.String(v) + } + + if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { + tags := Tags(tftags.New(v).IgnoreAWS()) + if len(tags) > 0 { + result.Tags = tags + } + } + + return result +} + +func ExpandReplicationTime(l []interface{}) *s3.ReplicationTime { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationTime{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + if v, ok := tfMap["time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.Time = ExpandReplicationTimeValue(v) + } + + return result +} + +func ExpandReplicationTimeValue(l []interface{}) *s3.ReplicationTimeValue { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicationTimeValue{} + + if v, ok := tfMap["minutes"].(int); ok { + result.Minutes = aws.Int64(int64(v)) + } + + return result +} + +func ExpandReplicaModifications(l []interface{}) *s3.ReplicaModifications { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.ReplicaModifications{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandRules(l []interface{}) []*s3.ReplicationRule { + var rules []*s3.ReplicationRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + rule := &s3.ReplicationRule{} + + if v, ok := tfMap["delete_marker_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.DeleteMarkerReplication = ExpandDeleteMarkerReplication(v) + } + + if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.Destination = ExpandDestination(v) + } + + if v, ok := tfMap["existing_object_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.ExistingObjectReplication = ExpandExistingObjectReplication(v) + } + + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.Filter = ExpandFilter(v) + } + + if v, ok := tfMap["id"].(string); ok && v != "" { + rule.ID = aws.String(v) + } + + if v, ok := tfMap["prefix"].(string); ok && v != "" { + rule.Prefix = aws.String(v) + } + + if v, ok := tfMap["priority"].(int); ok && rule.Filter != nil { + rule.Priority = aws.Int64(int64(v)) + } + + if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.SourceSelectionCriteria = ExpandSourceSelectionCriteria(v) + } + + if v, ok := tfMap["status"].(string); ok && v != "" { + rule.Status = aws.String(v) + } + + rules = append(rules, rule) + } + + return rules +} + +func ExpandSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.SourceSelectionCriteria{} + + if v, ok := tfMap["replica_modifications"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.ReplicaModifications = ExpandReplicaModifications(v) + } + + if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.SseKmsEncryptedObjects = ExpandSseKmsEncryptedObjects(v) + } + + return result +} + +func ExpandSseKmsEncryptedObjects(l []interface{}) *s3.SseKmsEncryptedObjects { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.SseKmsEncryptedObjects{} + + if v, ok := tfMap["status"].(string); ok && v != "" { + result.Status = aws.String(v) + } + + return result +} + +func ExpandTag(l []interface{}) *s3.Tag { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + result := &s3.Tag{} + + if v, ok := tfMap["key"].(string); ok && v != "" { + result.Key = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + result.Value = aws.String(v) + } + + return result +} + +func FlattenAccessControlTranslation(act *s3.AccessControlTranslation) []interface{} { + if act == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if act.Owner != nil { + m["owner"] = aws.StringValue(act.Owner) + } + + return []interface{}{m} +} + +func FlattenEncryptionConfiguration(ec *s3.EncryptionConfiguration) []interface{} { + if ec == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if ec.ReplicaKmsKeyID != nil { + m["replica_kms_key_id"] = aws.StringValue(ec.ReplicaKmsKeyID) + } + + return []interface{}{m} +} + +func FlattenDeleteMarkerReplication(dmr *s3.DeleteMarkerReplication) []interface{} { + if dmr == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if dmr.Status != nil { + m["status"] = aws.StringValue(dmr.Status) + } + + return []interface{}{m} +} + +func FlattenDestination(dest *s3.Destination) []interface{} { + if dest == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if dest.AccessControlTranslation != nil { + m["access_control_translation"] = FlattenAccessControlTranslation(dest.AccessControlTranslation) + } + + if dest.Account != nil { + m["account"] = aws.StringValue(dest.Account) + } + + if dest.Bucket != nil { + m["bucket"] = aws.StringValue(dest.Bucket) + } + + if dest.EncryptionConfiguration != nil { + m["encryption_configuration"] = FlattenEncryptionConfiguration(dest.EncryptionConfiguration) + } + + if dest.Metrics != nil { + m["metrics"] = FlattenMetrics(dest.Metrics) + } + + if dest.ReplicationTime != nil { + m["replication_time"] = FlattenReplicationTime(dest.ReplicationTime) + } + + if dest.StorageClass != nil { + m["storage_class"] = aws.StringValue(dest.StorageClass) + } + + return []interface{}{m} +} + +func FlattenExistingObjectReplication(eor *s3.ExistingObjectReplication) []interface{} { + if eor == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if eor.Status != nil { + m["status"] = aws.StringValue(eor.Status) + } + + return []interface{}{m} +} + +func FlattenFilter(filter *s3.ReplicationRuleFilter) []interface{} { + if filter == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if filter.And != nil { + m["and"] = FlattenReplicationRuleAndOperator(filter.And) + } + + if filter.Prefix != nil { + m["prefix"] = aws.StringValue(filter.Prefix) + } + + if filter.Tag != nil { + tag := KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() + m["tag"] = []interface{}{tag} + } + + return []interface{}{m} +} + +func FlattenMetrics(metrics *s3.Metrics) []interface{} { + if metrics == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if metrics.EventThreshold != nil { + m["event_threshold"] = FlattenReplicationTimeValue(metrics.EventThreshold) + } + + if metrics.Status != nil { + m["status"] = aws.StringValue(metrics.Status) + } + + return []interface{}{m} +} + +func FlattenReplicationTime(rt *s3.ReplicationTime) []interface{} { + if rt == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if rt.Status != nil { + m["status"] = aws.StringValue(rt.Status) + } + + if rt.Time != nil { + m["time"] = FlattenReplicationTimeValue(rt.Time) + } + + return []interface{}{m} + +} + +func FlattenReplicationTimeValue(rtv *s3.ReplicationTimeValue) []interface{} { + if rtv == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if rtv.Minutes != nil { + m["minutes"] = int(aws.Int64Value(rtv.Minutes)) + } + + return []interface{}{m} +} + +func FlattenRules(rules []*s3.ReplicationRule) []interface{} { + if len(rules) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, rule := range rules { + if rule == nil { + continue + } + + m := make(map[string]interface{}) + + if rule.DeleteMarkerReplication != nil { + m["delete_marker_replication"] = FlattenDeleteMarkerReplication(rule.DeleteMarkerReplication) + } + + if rule.Destination != nil { + m["destination"] = FlattenDestination(rule.Destination) + } + + if rule.ExistingObjectReplication != nil { + m["existing_object_replication"] = FlattenExistingObjectReplication(rule.ExistingObjectReplication) + } + + if rule.Filter != nil { + m["filter"] = FlattenFilter(rule.Filter) + } + + if rule.ID != nil { + m["id"] = aws.StringValue(rule.ID) + } + + if rule.Prefix != nil { + m["prefix"] = aws.StringValue(rule.Prefix) + } + + if rule.Priority != nil { + m["priority"] = int(aws.Int64Value(rule.Priority)) + } + + if rule.SourceSelectionCriteria != nil { + m["source_selection_criteria"] = FlattenSourceSelectionCriteria(rule.SourceSelectionCriteria) + } + + if rule.Status != nil { + m["status"] = aws.StringValue(rule.Status) + } + + results = append(results, m) + } + + return results +} + +func FlattenReplicaModifications(rc *s3.ReplicaModifications) []interface{} { + if rc == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if rc.Status != nil { + m["status"] = aws.StringValue(rc.Status) + } + + return []interface{}{m} +} + +func FlattenReplicationRuleAndOperator(op *s3.ReplicationRuleAndOperator) []interface{} { + if op == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if op.Prefix != nil { + m["prefix"] = aws.StringValue(op.Prefix) + } + + if op.Tags != nil { + m["tags"] = KeyValueTags(op.Tags).IgnoreAWS().Map() + } + + return []interface{}{m} + +} + +func FlattenSourceSelectionCriteria(ssc *s3.SourceSelectionCriteria) []interface{} { + if ssc == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if ssc.ReplicaModifications != nil { + m["replica_modifications"] = FlattenReplicaModifications(ssc.ReplicaModifications) + } + + if ssc.SseKmsEncryptedObjects != nil { + m["sse_kms_encrypted_objects"] = FlattenSseKmsEncryptedObjects(ssc.SseKmsEncryptedObjects) + } + + return []interface{}{m} +} + +func FlattenSseKmsEncryptedObjects(objects *s3.SseKmsEncryptedObjects) []interface{} { + if objects == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if objects.Status != nil { + m["status"] = aws.StringValue(objects.Status) + } + + return []interface{}{m} +} From 875cd9df213ccb2b7522825f62bfa9656b5af026 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 17 Nov 2021 21:21:36 -0500 Subject: [PATCH 262/304] CR updates: docs --- website/docs/r/s3_bucket.html.markdown | 4 +- ...et_replication_configuration.html.markdown | 186 ++++++++++++------ 2 files changed, 125 insertions(+), 65 deletions(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 57bb495ebd70..0d07f54b221e 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -178,7 +178,7 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using replication configuration -~> **NOTE:** See `aws_s3_bucket_replication_configuration` to support bi-directional replication configuration and additional features. +~> **NOTE:** See the [`aws_s3_bucket_replication_configuration` resource](/docs/providers/aws/r/s3_bucket_replication_configuration.html) to support bi-directional replication configuration and additional features. ```terraform provider "aws" { @@ -438,7 +438,7 @@ The `noncurrent_version_transition` object supports the following The `replication_configuration` object supports the following: -~> **NOTE:** See the `aws_s3_bucket_replication_configuration` resource documentation to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. +~> **NOTE:** See the [`aws_s3_bucket_replication_configuration` resource documentation](/docs/providers/aws/r/s3_bucket_replication_configuration.html) to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. ``` lifecycle { diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index 8de6f01c14ee..d1820a83d474 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -14,7 +14,7 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration -``` +```terraform provider "aws" { region = "eu-west-1" } @@ -107,6 +107,7 @@ resource "aws_s3_bucket" "source" { versioning { enabled = true } + lifecycle { ignore_changes = [ replication_configuration @@ -117,7 +118,8 @@ resource "aws_s3_bucket" "source" { resource "aws_s3_bucket_replication_configuration" "replication" { role = aws_iam_role.replication.arn bucket = aws_s3_bucket.source.id - rules { + + rule { id = "foobar" prefix = "foo" status = "Enabled" @@ -132,7 +134,7 @@ resource "aws_s3_bucket_replication_configuration" "replication" { ### Bi-Directional Replication -``` +```terraform # ... other configuration ... resource "aws_s3_bucket" "east" { @@ -167,7 +169,8 @@ resource "aws_s3_bucket" "west" { resource "aws_s3_bucket_replication_configuration" "east_to_west" { role = aws_iam_role.east_replication.arn bucket = aws_s3_bucket.east.id - rules { + + rule { id = "foobar" prefix = "foo" status = "Enabled" @@ -182,7 +185,8 @@ resource "aws_s3_bucket_replication_configuration" "east_to_west" { resource "aws_s3_bucket_replication_configuration" "west_to_east" { role = aws_iam_role.west_replication.arn bucket = aws_s3_bucket.west.id - rules { + + rule { id = "foobar" prefix = "foo" status = "Enabled" @@ -199,7 +203,7 @@ resource "aws_s3_bucket_replication_configuration" "west_to_east" { ~> **NOTE:** To avoid conflicts always add the following lifecycle object to the `aws_s3_bucket` resource of the source bucket. -This resource implements the same features that are provided by the `replication_configuration` object of the `aws_s3_bucket` resource. To avoid conflicts or unexpected apply results a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. +This resource implements the same features that are provided by the `replication_configuration` object of the [`aws_s3_bucket` resource](/docs/providers/aws/r/s3_bucket.html). To avoid conflicts or unexpected apply results, a lifecycle configuration is needed on the `aws_s3_bucket` to ignore changes to the internal `replication_configuration` object. Failure to add the `lifecycle` configuration to the `aws_s3_bucket` will result in conflicting state results. ``` lifecycle { @@ -209,87 +213,114 @@ lifecycle { } ``` -The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the `aws_s3_bucket` resource: +The `aws_s3_bucket_replication_configuration` resource provides the following features that are not available in the [`aws_s3_bucket` resource](/docs/providers/aws/r/s3_bucket.html): * `replica_modifications` - Added to the `source_selection_criteria` configuration object [documented below](#source_selection_criteria) * `metrics` - Added to the `destination` configuration object [documented below](#metrics) * `replication_time` - Added to the `destination` configuration object [documented below](#replication_time) * `existing_object_replication` - Added to the replication rule object [documented below](#existing_object_replication) -Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) - +Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication). ## Argument Reference -The `replication_configuration` resource supports the following: +The following arguments are supported: * `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. -* `rules` - (Required) Specifies the rules managing the replication [documented below](#rules). +* `rule` - (Required) Set of configuration blocks describing the rules managing the replication [documented below](#rule). -### rules +### rule -~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rule` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. ~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. -The `rules` object supports the following: - -With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +The `rule` configuration block supports the following arguments: -* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `delete_marker_replication` - (Optional) Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when `filter` is used)[documented below](#delete_marker_replication). * `destination` - (Required) Specifies the destination for the rule [documented below](#destination). +* `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. -* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". -### existing_object_replication +### delete_marker_replication -~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) +~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. -The `existing_object_replication` object supports the following: +~> **NOTE:** This argument is only available with V2 replication configurations. ``` -existing_object_replication { +delete_marker_replication { status = "Enabled" } ``` -* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. +The `delete_marker_replication` configuration block supports the following arguments: +* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. -### delete_marker_replication +### destination -~> **NOTE:** This configuration format differs from that of `aws_s3_bucket`. +The `destination` configuration block supports the following arguments: -~> **NOTE:** This argument is only available with V2 replication configurations. +* `access_control_translation` - (Optional) A configuration block that specifies the overrides to use for object owners on replication [documented below](#access_control_translation). Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account` owner override configuration. +* `account` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. +* `encryption_configuration` - (Optional) A configuration block that provides information about encryption [documented below](#encryption_configuration). If `source_selection_criteria` is specified, you must specify this element. +* `metrics` - (Optional) A configuration block that specifies replication metrics-related settings enabling replication metrics and events [documented below](#metrics). +* `replication_time` - (Optional) A configuration block that specifies S3 Replication Time Control (S3 RTC), including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated [documented below](#replication_time). Replication Time Control must be used in conjunction with `metrics`. +* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. -The `delete_marker_replication` object supports the following: +### access_control_translation ``` -delete_marker_replication { +access_control_translation { + owner = "Destination" +} +``` + +The `access_control_translation` configuration block supports the following arguments: + +* `owner` - (Required) Specifies the replica ownership. For default and valid values, see [PUT bucket replication](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) in the Amazon S3 API Reference. Valid values: `Destination`. + +### encryption_configuration + +``` +encryption_configuration { + replica_kms_key_id = "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" +} +``` + +The `encryption_configuration` configuration block supports the following arguments: + +* `replica_kms_key_id` - (Required) The ID (Key ARN or Alias ARN) of the customer managed AWS KMS key stored in AWS Key Management Service (KMS) for the destination bucket. + +### metrics + +``` +metrics { + event_threshold { + minutes = 15 + } status = "Enabled" } ``` -* `status` - (Required) Whether delete markers should be replicated. Either `"Enabled"` or `"Disabled"`. +The `metrics` configuration block supports the following arguments: +* `event_threshold` - (Required) A configuration block that specifies the time threshold for emitting the `s3:Replication:OperationMissedThreshold` event [documented below](#event_threshold). +* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. -### destination -The `destination` object supports the following: +### event_threshold -* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the objects identified by the rule. -* `storage_class` - (Optional) The class of storage used to store the object. Can be `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, or `DEEP_ARCHIVE`. By default, Amazon S3 uses the storage class of the source object to create the object replica. -* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with - `sse_kms_encrypted_objects` source selection criteria. -* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with `account_id` owner override configuration. -* `account_id` - (Optional) The Account ID to specify the replica ownership. Must be used in conjunction with `access_control_translation` override configuration. -* `replication_time` - (Optional) Replication Time Control must be used in conjunction with `metrics` [documented below](#replication_time). -* `metrics` - (Optional) Metrics must be used in conjunction with `replication_time` [documented below](#metrics). +The `event_threshold` configuration block supports the following arguments: + +* `minutes` - (Required) Time in minutes. Valid values: `15`. ### replication_time @@ -302,34 +333,60 @@ replication_time { } ``` -The `replication_time` object supports the following: +The `replication_time` configuration block supports the following arguments: * `status` - (Required) The status of the Replication Time Control. Either `"Enabled"` or `"Disabled"`. -* `time` - (Required) The replication time `minutes` to be configured. The `minutes` value is expected to be an integer. +* `time` - (Required) A configuration block specifying the time by which replication should be complete for all objects and operations on objects [documented below](#time). -### metrics +### time + +The `time` configuration block supports the following arguments: + +* `minutes` - (Required) Time in minutes. Valid values: `15`. + +### existing_object_replication + +~> **NOTE:** Replication for existing objects requires activation by AWS Support. See [userguide/replication-what-is-isnot-replicated](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-what-is-isnot-replicated.html#existing-object-replication) ``` -metrics { +existing_object_replication { status = "Enabled" - event_threshold { - minutes = 15 - } } ``` -The `metrics` object supports the following: +The `existing_object_replication` configuration block supports the following arguments: -* `status` - (Required) The status of the Destination Metrics. Either `"Enabled"` or `"Disabled"`. -* `event_threshold` - (Required) The time in `minutes` specifying the operation missed threshold event. The `minutes` value is expected to be an integer. +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. -### source_selection_criteria +### filter + +~> **NOTE:** With the `filter` argument, you must specify exactly one of `prefix`, `tag`, or `and`. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +The `filter` configuration block supports the following arguments: + +* `and` - (Optional) A configuration block for specifying rule filters. This element is required only if you specify more than one filter. See [and](#and) below for more details. +* `prefix` - (Optional) An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tag` - (Optional) A configuration block for specifying a tag key and value [documented below](#tag). + +### and + +The `and` configuration block supports the following arguments: + +* `prefix` - (Optional) An object key name prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional, Required if `prefix` is configured) A map of tags (key and value pairs) that identifies a subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. -The `source_selection_criteria` object supports the following: +### tag + +The `tag` configuration block supports the following arguments: + +* `key` - (Required) Name of the object key. +* `value` - (Required) Value of the tag. + +### source_selection_criteria ``` source_selection_criteria { - replica_modification { + replica_modifications { status = "Enabled" } sse_kms_encrypted_objects { @@ -338,28 +395,31 @@ source_selection_criteria { } ``` - ~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the `aws_s3_bucket` resource. +The `source_selection_criteria` configuration block supports the following arguments: -* `replica_modifications` - (Optional) Keep object metadata such as tags, ACLs, and Object Lock settings replicated between - replicas and source objects. The `status` value is required to be either `"Enabled"` or `"Disabled"`. +~> **NOTE:** `sse_kms_encrypted_objects` configuration format differs here from the configuration in the [`aws_s3_bucket` resource](/docs/providers/aws/r/s3_bucket.html). -* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects (documented below). If specified, `replica_kms_key_id` - in `destination` must be specified as well. The `status` value is required to be either `"Enabled"` or `"Disabled"`. +* `replica_modifications` - (Optional) A configuration block that you can specify for selections for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when `filter` is specified), you can specify this element and set the status to `Enabled` to replicate modifications on replicas. +* `sse_kms_encrypted_objects` - (Optional) A configuration block for filter information for the selection of Amazon S3 objects encrypted with AWS KMS. If specified, `replica_kms_key_id` in `destination` `encryption_configuration` must be specified as well. -### filter +### replica_modifications -The `filter` object supports the following: +The `replica_modifications` configuration block supports the following arguments: -* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. -* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. -The rule applies only to objects having all the tags in its tagset. +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. + +### sse_kms_encrypted_objects + +The `sse_kms_encrypted_objects` configuration block supports the following arguments: + +* `status` - (Required) Whether the existing objects should be replicated. Either `"Enabled"` or `"Disabled"`. ## Attributes Reference In addition to all arguments above, the following attributes are exported: -* id - Resource id is the s3 source bucket name. +* `id` - The S3 source bucket name. ## Import From e84b8adea5b87eff41d3154635e4e77a64775c3c Mon Sep 17 00:00:00 2001 From: Victor Paredes Date: Wed, 17 Nov 2021 22:58:35 -0500 Subject: [PATCH 263/304] Increase compatible_runtimes MaxItems value to 15. --- internal/service/lambda/layer_version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/lambda/layer_version.go b/internal/service/lambda/layer_version.go index 281f7cac55e6..2ba388fda8dd 100644 --- a/internal/service/lambda/layer_version.go +++ b/internal/service/lambda/layer_version.go @@ -74,7 +74,7 @@ func ResourceLayerVersion() *schema.Resource { Optional: true, ForceNew: true, MinItems: 0, - MaxItems: 5, + MaxItems: 15, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice(lambda.Runtime_Values(), false), From 557ff34cb75be68a135bb3cf7f3ccf0bab9b83fc Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 17 Nov 2021 22:59:08 -0500 Subject: [PATCH 264/304] retain Set hashing --- internal/service/s3/bucket_replication_configuration.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index ec27a3fdc4d0..4b577b43e79e 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -41,7 +41,7 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "rule": { Type: schema.TypeSet, Required: true, - //Set: rulesHash, + Set: rulesHash, MaxItems: 1000, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ From 86360fc18dd93c3dc5fa2e640eb99b12ca84d8d1 Mon Sep 17 00:00:00 2001 From: Victor Paredes Date: Thu, 18 Nov 2021 00:10:30 -0500 Subject: [PATCH 265/304] Add changelog for fix --- .changelog/21825.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21825.txt diff --git a/.changelog/21825.txt b/.changelog/21825.txt new file mode 100644 index 000000000000..421172538b8c --- /dev/null +++ b/.changelog/21825.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lambda_layer_version: Increase MaxItems for compatible_runtimes field to 15. +``` \ No newline at end of file From d49148bcfbc017f41e81a2611ee0796dea86afe7 Mon Sep 17 00:00:00 2001 From: ialidzhikov Date: Thu, 18 Nov 2021 12:58:02 +0200 Subject: [PATCH 266/304] r/route: Prevent giving up on route ready wait too early Signed-off-by: ialidzhikov --- internal/service/ec2/wait.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/service/ec2/wait.go b/internal/service/ec2/wait.go index 87e537b36fe3..c9838b8784bc 100644 --- a/internal/service/ec2/wait.go +++ b/internal/service/ec2/wait.go @@ -22,6 +22,7 @@ const ( // General timeout for EC2 resource creations to propagate PropagationTimeout = 2 * time.Minute + RouteNotFoundChecks = 1000 // Should exceed any reasonable custom timeout value. RouteTableNotFoundChecks = 1000 // Should exceed any reasonable custom timeout value. RouteTableAssociationCreatedNotFoundChecks = 1000 // Should exceed any reasonable custom timeout value. ) @@ -291,6 +292,7 @@ func WaitRouteReady(conn *ec2.EC2, routeFinder RouteFinder, routeTableID, destin Target: []string{RouteStatusReady}, Refresh: StatusRoute(conn, routeFinder, routeTableID, destination), Timeout: timeout, + NotFoundChecks: RouteNotFoundChecks, ContinuousTargetOccurence: 2, } From 402edc10972504ad3b07591fdb2de6602622cb33 Mon Sep 17 00:00:00 2001 From: ialidzhikov Date: Thu, 18 Nov 2021 13:17:28 +0200 Subject: [PATCH 267/304] Add CHANGELOG entry file Signed-off-by: ialidzhikov --- .changelog/21831.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21831.txt diff --git a/.changelog/21831.txt b/.changelog/21831.txt new file mode 100644 index 000000000000..72ad7294847c --- /dev/null +++ b/.changelog/21831.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_route: On route creation with high custom creation timeout configured, the aws_route resource does no longer give up before the create timeout is exceeded (previously it was giving up after 20 not found checks). +``` \ No newline at end of file From f048ec6b03192b6982360aca66c82d33c2766614 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 17 Nov 2021 17:14:37 -0500 Subject: [PATCH 268/304] r/aws_iot_thing_group: First passing acceptance tests. Acceptance test output: % make testacc PKG_NAME=internal/service/iot TESTARGS='-run=TestAccIoTThingGroup_basic\|TestAccIoTThingGroup_disappears' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/iot/... -v -count 1 -parallel 20 -run=TestAccIoTThingGroup_basic\|TestAccIoTThingGroup_disappears -timeout 180m === RUN TestAccIoTThingGroup_basic === PAUSE TestAccIoTThingGroup_basic === RUN TestAccIoTThingGroup_disappears === PAUSE TestAccIoTThingGroup_disappears === CONT TestAccIoTThingGroup_basic === CONT TestAccIoTThingGroup_disappears --- PASS: TestAccIoTThingGroup_disappears (12.92s) --- PASS: TestAccIoTThingGroup_basic (17.35s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/iot 20.592s --- internal/service/iot/find.go | 2 +- internal/service/iot/thing_group.go | 282 ++++++++++++------- internal/service/iot/thing_group_test.go | 51 +++- website/docs/r/iot_thing_group.html.markdown | 20 +- 4 files changed, 242 insertions(+), 113 deletions(-) diff --git a/internal/service/iot/find.go b/internal/service/iot/find.go index aba3a4062954..2e5d0cf7f681 100644 --- a/internal/service/iot/find.go +++ b/internal/service/iot/find.go @@ -56,4 +56,4 @@ func FindThingGroupByName(conn *iot.IoT, name string) (*iot.DescribeThingGroupOu } return output, nil -} \ No newline at end of file +} diff --git a/internal/service/iot/thing_group.go b/internal/service/iot/thing_group.go index f77ebfefb0d5..c760a3294178 100644 --- a/internal/service/iot/thing_group.go +++ b/internal/service/iot/thing_group.go @@ -3,6 +3,7 @@ package iot import ( "fmt" "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" @@ -32,49 +33,20 @@ func ResourceThingGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - "parent_group_name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - "properties": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attributes": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, "metadata": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "creation_date": { - Type: schema.TypeInt, + Type: schema.TypeString, Computed: true, }, "parent_group_name": { Type: schema.TypeString, Computed: true, }, - "root_to_parent_groups": { + "root_to_parent_thing_groups": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ @@ -93,12 +65,50 @@ func ResourceThingGroup() *schema.Resource { }, }, }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "parent_group_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "properties": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute_payload": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attributes": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), "version": { Type: schema.TypeInt, Computed: true, }, - "tags": tftags.TagsSchema(), - "tags_all": tftags.TagsSchemaComputed(), }, CustomizeDiff: verify.SetTagsDiff, @@ -110,15 +120,17 @@ func resourceThingGroupCreate(d *schema.ResourceData, meta interface{}) error { defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) + name := d.Get("name").(string) input := &iot.CreateThingGroupInput{ - ThingGroupName: aws.String(d.Get("name").(string)), + ThingGroupName: aws.String(name), } if v, ok := d.GetOk("parent_group_name"); ok { input.ParentGroupName = aws.String(v.(string)) } - if v, ok := d.GetOk("properties"); ok { - input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) + + if v, ok := d.GetOk("properties"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ThingGroupProperties = expandThingGroupProperties(v.([]interface{})[0].(map[string]interface{})) } if len(tags) > 0 { @@ -126,12 +138,14 @@ func resourceThingGroupCreate(d *schema.ResourceData, meta interface{}) error { } log.Printf("[DEBUG] Creating IoT Thing Group: %s", input) - out, err := conn.CreateThingGroup(input) + output, err := conn.CreateThingGroup(input) + if err != nil { - return err + return fmt.Errorf("error creating IoT Thing Group (%s): %w", name, err) } - d.SetId(aws.StringValue(out.ThingGroupName)) + d.SetId(aws.StringValue(output.ThingGroupName)) + return resourceThingGroupRead(d, meta) } @@ -155,11 +169,25 @@ func resourceThingGroupRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", output.ThingGroupArn) d.Set("name", output.ThingGroupName) - if err := d.Set("metadata", flattenIotThingGroupMetadata(output.ThingGroupMetadata)); err != nil { - return fmt.Errorf("error setting metadata: %s", err) + if output.ThingGroupMetadata != nil { + if err := d.Set("metadata", []interface{}{flattenThingGroupMetadata(output.ThingGroupMetadata)}); err != nil { + return fmt.Errorf("error setting metadata: %w", err) + } + } else { + d.Set("metadata", nil) + } + if v := flattenThingGroupProperties(output.ThingGroupProperties); len(v) > 0 { + if err := d.Set("properties", []interface{}{}); err != nil { + return fmt.Errorf("error setting properties: %w", err) + } + } else { + d.Set("properties", nil) } - if err := d.Set("properties", flattenIotThingGroupProperties(output.ThingGroupProperties)); err != nil { - return fmt.Errorf("error setting properties: %s", err) + + if output.ThingGroupMetadata != nil { + d.Set("parent_group_name", output.ThingGroupMetadata.ParentGroupName) + } else { + d.Set("parent_group_name", nil) } d.Set("version", output.Version) @@ -185,22 +213,24 @@ func resourceThingGroupRead(d *schema.ResourceData, meta interface{}) error { func resourceThingGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - input := &iot.UpdateThingGroupInput{ - ThingGroupName: aws.String(d.Get("name").(string)), - } + if d.HasChangesExcept("tags", "tags_all") { + input := &iot.UpdateThingGroupInput{ + ThingGroupName: aws.String(d.Get("name").(string)), + } - if v, ok := d.GetOk("properties"); ok { - input.ThingGroupProperties = expandIotThingsGroupProperties(v.([]interface{})) - } + if v, ok := d.GetOk("properties"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ThingGroupProperties = expandThingGroupProperties(v.([]interface{})[0].(map[string]interface{})) + } - _, err := conn.UpdateThingGroup(input) - if err != nil { - return err + _, err := conn.UpdateThingGroup(input) + + if err != nil { + return fmt.Errorf("error updating IoT Thing Group (%s): %w", d.Id(), err) + } } if d.HasChange("tags_all") { o, n := d.GetChange("tags_all") - if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { return fmt.Errorf("error updating tags: %s", err) } @@ -212,80 +242,140 @@ func resourceThingGroupUpdate(d *schema.ResourceData, meta interface{}) error { func resourceThingGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - input := &iot.DeleteThingGroupInput{ + log.Printf("[DEBUG] Deleting IoT Thing Group: %s", d.Id()) + _, err := conn.DeleteThingGroup(&iot.DeleteThingGroupInput{ ThingGroupName: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + return nil } - log.Printf("[DEBUG] Deleting IoT Thing Group: %s", input) - _, err := conn.DeleteThingGroup(input) if err != nil { - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return nil - } - return err + return fmt.Errorf("error deleting IoT Thing Group (%s): %w", d.Id(), err) } return nil } -func expandIotThingsGroupProperties(l []interface{}) *iot.ThingGroupProperties { - m := l[0].(map[string]interface{}) +func expandThingGroupProperties(tfMap map[string]interface{}) *iot.ThingGroupProperties { + if tfMap == nil { + return nil + } - thingGroupProperties := &iot.ThingGroupProperties{} + apiObject := &iot.ThingGroupProperties{} - if v, ok := m["attributes"]; ok { - thingGroupProperties.AttributePayload = &iot.AttributePayload{ - Attributes: flex.ExpandStringMap(v.(map[string]interface{})), - } + if v, ok := tfMap["attribute_payload"].([]interface{}); ok && len(v) > 0 { + apiObject.AttributePayload = expandAttributePayload(v[0].(map[string]interface{})) } - if v, ok := m["description"]; ok { - thingGroupProperties.ThingGroupDescription = aws.String(v.(string)) + if v, ok := tfMap["description"].(string); ok && v != "" { + apiObject.ThingGroupDescription = aws.String(v) } - return thingGroupProperties + return apiObject } -func flattenIotThingGroupProperties(properties *iot.ThingGroupProperties) []map[string]interface{} { - if properties == nil { - return []map[string]interface{}{} +func expandAttributePayload(tfMap map[string]interface{}) *iot.AttributePayload { + if tfMap == nil { + return nil + } + + apiObject := &iot.AttributePayload{} + + if v, ok := tfMap["attributes"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.Attributes = flex.ExpandStringMap(v) + } + + return apiObject +} + +func flattenThingGroupMetadata(apiObject *iot.ThingGroupMetadata) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.CreationDate; v != nil { + tfMap["creation_date"] = aws.TimeValue(v).Format(time.RFC3339) } - props := map[string]interface{}{ - "description": aws.StringValue(properties.ThingGroupDescription), + if v := apiObject.ParentGroupName; v != nil { + tfMap["parent_group_name"] = aws.StringValue(v) } - if properties.AttributePayload != nil { - props["attributes"] = aws.StringValueMap(properties.AttributePayload.Attributes) + if v := apiObject.RootToParentThingGroups; v != nil { + tfMap["root_to_parent_groups"] = flattenGroupNameAndArns(v) } - return []map[string]interface{}{props} + return tfMap } -func flattenIotThingGroupMetadata(metadata *iot.ThingGroupMetadata) []map[string]interface{} { - if metadata == nil { - return []map[string]interface{}{} +func flattenGroupNameAndArn(apiObject *iot.GroupNameAndArn) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.GroupArn; v != nil { + tfMap["group_arn"] = aws.StringValue(v) } - meta := map[string]interface{}{ - "creation_date": aws.TimeValue(metadata.CreationDate).Unix(), - "parent_group_name": aws.StringValue(metadata.ParentGroupName), - "root_to_parent_groups": expandIotGroupNameAndArnList(metadata.RootToParentThingGroups), + if v := apiObject.GroupName; v != nil { + tfMap["group_name"] = aws.StringValue(v) } - return []map[string]interface{}{meta} + return tfMap } -func expandIotGroupNameAndArnList(lgn []*iot.GroupNameAndArn) []*iot.GroupNameAndArn { - vs := make([]*iot.GroupNameAndArn, 0, len(lgn)) - for _, v := range lgn { - val, ok := interface{}(v).(iot.GroupNameAndArn) - if ok { - vs = append(vs, &iot.GroupNameAndArn{ - GroupName: val.GroupName, - GroupArn: val.GroupArn, - }) +func flattenGroupNameAndArns(apiObjects []*iot.GroupNameAndArn) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue } + + tfList = append(tfList, flattenGroupNameAndArn(apiObject)) } - return vs + + return tfList +} + +func flattenThingGroupProperties(apiObject *iot.ThingGroupProperties) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := flattenAttributePayload(apiObject.AttributePayload); len(v) > 0 { + tfMap["attribute_payload"] = []interface{}{v} + } + + if v := apiObject.ThingGroupDescription; v != nil { + tfMap["description"] = aws.StringValue(v) + } + + return tfMap +} + +func flattenAttributePayload(apiObject *iot.AttributePayload) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Attributes; v != nil { + tfMap["attributes"] = aws.StringValueMap(v) + } + + return tfMap } diff --git a/internal/service/iot/thing_group_test.go b/internal/service/iot/thing_group_test.go index 685ba4d05dcb..0bee569b4513 100644 --- a/internal/service/iot/thing_group_test.go +++ b/internal/service/iot/thing_group_test.go @@ -14,10 +14,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestAccIoTThingGroup_base(t *testing.T) { +func TestAccIoTThingGroup_basic(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -27,15 +26,18 @@ func TestAccIoTThingGroup_base(t *testing.T) { CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { - Config: testAccThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckNoResourceAttr(resourceName, "parent_group_name"), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "metadata.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), + resource.TestCheckResourceAttr(resourceName, "metadata.0.parent_group_name", ""), + resource.TestCheckResourceAttr(resourceName, "metadata.0.root_to_parent_thing_groups.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), + resource.TestCheckResourceAttr(resourceName, "properties.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrSet(resourceName, "version"), ), }, @@ -48,6 +50,29 @@ func TestAccIoTThingGroup_base(t *testing.T) { }) } +func TestAccIoTThingGroup_disappears(t *testing.T) { + var thingGroup iot.DescribeThingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccThingGroupConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIotThingGroupExists(resourceName, &thingGroup), + acctest.CheckResourceDisappears(acctest.Provider, tfiot.ResourceThingGroup(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccIoTThingGroup_full(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput rString := sdkacctest.RandString(8) @@ -468,6 +493,14 @@ func testAccCheckThingGroupDestroy(s *terraform.State) error { return nil } +func testAccThingGroupConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "test" { + name = %[1]q +} +`, rName) +} + func testAccThingGroupConfig_base(thingGroupName string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { diff --git a/website/docs/r/iot_thing_group.html.markdown b/website/docs/r/iot_thing_group.html.markdown index 0423bc30f762..bcb0d7115c59 100644 --- a/website/docs/r/iot_thing_group.html.markdown +++ b/website/docs/r/iot_thing_group.html.markdown @@ -3,12 +3,12 @@ subcategory: "IoT" layout: "aws" page_title: "AWS: aws_iot_thing_group" description: |- - Creates and manages an AWS IoT Thing Group. + Manages an AWS IoT Thing Group. --- # Resource: aws_iot_thing_group -Creates and manages an AWS IoT Thing Group. +Manages an AWS IoT Thing Group. ## Example Usage @@ -23,9 +23,11 @@ resource "aws_iot_thing_group" "example" { parent_group_name = aws_iot_thing_group.parent.name properties { - attributes = { - One = "11111" - Two = "TwoTwo" + attribute_payload { + attributes = { + One = "11111" + Two = "TwoTwo" + } } description = "This is my thing group" } @@ -45,16 +47,20 @@ resource "aws_iot_thing_group" "example" { ## properties Reference -* `attributes` - (Optional) Map of attributes of the Thing Group. +* `attribute_payload` - (Optional) The Thing Group attributes. Defined below. * `description` - (Optional) A description of the Thing Group. +## attribute_payload Reference + +* `attributes` - (Optional) Key-value map. + ## Attributes Reference In addition to the arguments above, the following attributes are exported: +* `arn` - The ARN of the Thing Group. * `id` - The Thing Group ID. * `version` - The current version of the Thing Group record in the registry. -* `arn` - The ARN of the Thing Group. ## Import From f19787c7b164614c718ffe67f1a611eaae35001d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 12:22:52 -0500 Subject: [PATCH 269/304] r/aws_iot_thing_group: Passing acceptance tests. Acceptance test output: % make testacc PKG_NAME=internal/service/iot TESTARGS='-run=TestAccIoTThingGroup_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/iot/... -v -count 1 -parallel 20 -run=TestAccIoTThingGroup_ -timeout 180m === RUN TestAccIoTThingGroup_basic === PAUSE TestAccIoTThingGroup_basic === RUN TestAccIoTThingGroup_disappears === PAUSE TestAccIoTThingGroup_disappears === RUN TestAccIoTThingGroup_tags === PAUSE TestAccIoTThingGroup_tags === RUN TestAccIoTThingGroup_parentGroup === PAUSE TestAccIoTThingGroup_parentGroup === RUN TestAccIoTThingGroup_properties === PAUSE TestAccIoTThingGroup_properties === CONT TestAccIoTThingGroup_basic === CONT TestAccIoTThingGroup_parentGroup === CONT TestAccIoTThingGroup_tags === CONT TestAccIoTThingGroup_disappears === CONT TestAccIoTThingGroup_properties --- PASS: TestAccIoTThingGroup_disappears (12.03s) --- PASS: TestAccIoTThingGroup_basic (16.63s) --- PASS: TestAccIoTThingGroup_parentGroup (22.57s) --- PASS: TestAccIoTThingGroup_properties (27.33s) --- PASS: TestAccIoTThingGroup_tags (37.49s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/iot 40.876s --- internal/service/iot/thing_group.go | 38 +- internal/service/iot/thing_group_test.go | 443 +++++------------------ 2 files changed, 119 insertions(+), 362 deletions(-) diff --git a/internal/service/iot/thing_group.go b/internal/service/iot/thing_group.go index c760a3294178..8b07a9f96467 100644 --- a/internal/service/iot/thing_group.go +++ b/internal/service/iot/thing_group.go @@ -46,7 +46,7 @@ func ResourceThingGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "root_to_parent_thing_groups": { + "root_to_parent_groups": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ @@ -86,6 +86,7 @@ func ResourceThingGroup() *schema.Resource { "attribute_payload": { Type: schema.TypeList, Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "attributes": { @@ -115,6 +116,10 @@ func ResourceThingGroup() *schema.Resource { } } +const ( + thingGroupDeleteTimeout = 1 * time.Minute +) + func resourceThingGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig @@ -177,7 +182,7 @@ func resourceThingGroupRead(d *schema.ResourceData, meta interface{}) error { d.Set("metadata", nil) } if v := flattenThingGroupProperties(output.ThingGroupProperties); len(v) > 0 { - if err := d.Set("properties", []interface{}{}); err != nil { + if err := d.Set("properties", []interface{}{v}); err != nil { return fmt.Errorf("error setting properties: %w", err) } } else { @@ -215,11 +220,22 @@ func resourceThingGroupUpdate(d *schema.ResourceData, meta interface{}) error { if d.HasChangesExcept("tags", "tags_all") { input := &iot.UpdateThingGroupInput{ - ThingGroupName: aws.String(d.Get("name").(string)), + ExpectedVersion: aws.Int64(int64(d.Get("version").(int))), + ThingGroupName: aws.String(d.Get("name").(string)), } if v, ok := d.GetOk("properties"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.ThingGroupProperties = expandThingGroupProperties(v.([]interface{})[0].(map[string]interface{})) + } else { + input.ThingGroupProperties = &iot.ThingGroupProperties{} + } + + // https://docs.aws.amazon.com/iot/latest/apireference/API_AttributePayload.html#API_AttributePayload_Contents: + // "To remove an attribute, call UpdateThing with an empty attribute value." + if input.ThingGroupProperties.AttributePayload == nil { + input.ThingGroupProperties.AttributePayload = &iot.AttributePayload{ + Attributes: map[string]*string{}, + } } _, err := conn.UpdateThingGroup(input) @@ -243,9 +259,19 @@ func resourceThingGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn log.Printf("[DEBUG] Deleting IoT Thing Group: %s", d.Id()) - _, err := conn.DeleteThingGroup(&iot.DeleteThingGroupInput{ - ThingGroupName: aws.String(d.Id()), - }) + _, err := tfresource.RetryWhen(thingGroupDeleteTimeout, + func() (interface{}, error) { + return conn.DeleteThingGroup(&iot.DeleteThingGroupInput{ + ThingGroupName: aws.String(d.Id()), + }) + }, + func(err error) (bool, error) { + if tfawserr.ErrMessageContains(err, iot.ErrCodeInvalidRequestException, "there are still child groups attached") { + return true, err + } + + return false, err + }) if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { return nil diff --git a/internal/service/iot/thing_group_test.go b/internal/service/iot/thing_group_test.go index 0bee569b4513..1ea3ff8d2676 100644 --- a/internal/service/iot/thing_group_test.go +++ b/internal/service/iot/thing_group_test.go @@ -2,6 +2,7 @@ package iot_test import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/service/iot" @@ -29,7 +30,7 @@ func TestAccIoTThingGroup_basic(t *testing.T) { Config: testAccThingGroupConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttrSet(resourceName, "arn"), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "iot", regexp.MustCompile(fmt.Sprintf("thinggroup/%s$", rName))), resource.TestCheckResourceAttr(resourceName, "metadata.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), resource.TestCheckResourceAttr(resourceName, "metadata.0.parent_group_name", ""), @@ -38,7 +39,7 @@ func TestAccIoTThingGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), resource.TestCheckResourceAttr(resourceName, "properties.#", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttrSet(resourceName, "version"), + resource.TestCheckResourceAttr(resourceName, "version", "1"), ), }, { @@ -73,11 +74,9 @@ func TestAccIoTThingGroup_disappears(t *testing.T) { }) } -func TestAccIoTThingGroup_full(t *testing.T) { +func TestAccIoTThingGroup_tags(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) - parentThingGroupName := thingGroupName + "_parent" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -86,76 +85,12 @@ func TestAccIoTThingGroup_full(t *testing.T) { Providers: acctest.Providers, CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ - { // BASE - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckNoResourceAttr(resourceName, "parent_group_name"), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), - resource.TestCheckResourceAttrSet(resourceName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "version"), - ), - }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { // UPDATE full - Config: testAccThingGroupConfig_full(thingGroupName, parentThingGroupName, "7", "this is my thing group", "myTag"), + Config: testAccThingGroupConfigTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "7"), - resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my thing group"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "myTag"), - resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), - resource.TestCheckResourceAttrSet(resourceName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "version"), - ), - }, - { // DELETE full - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), - resource.TestCheckResourceAttrSet(resourceName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "version"), - ), - }, - }, - }) -} - -func TestAccIoTThingGroup_name(t *testing.T) { - var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) - resourceName := "aws_iot_thing_group.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckThingGroupDestroy, - Steps: []resource.TestStep{ - { // CREATE - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "name", thingGroupName), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), }, { @@ -163,132 +98,33 @@ func TestAccIoTThingGroup_name(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - { // UPDATE - Config: testAccThingGroupConfig_base(thingGroupName + "_updated"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "name", thingGroupName+"_updated"), - ), - }, - }, - }) -} - -func TestAccIoTThingGroup_tags(t *testing.T) { - var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) - resourceName := "aws_iot_thing_group.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckThingGroupDestroy, - Steps: []resource.TestStep{ - { // BASE - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - ), - }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { // CREATE Tags - Config: testAccThingGroupConfig_withTags(thingGroupName, "myTag"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "myTag"), - ), - }, - { // UPDATE Tags - Config: testAccThingGroupConfig_withTags(thingGroupName, "myUpdatedTag"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.tagKey", "myUpdatedTag"), - ), - }, - { // DELETE Tags - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - ), - }, - }, - }) -} - -func TestAccIoTThingGroup_propsAttr(t *testing.T) { - var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) - resourceName := "aws_iot_thing_group.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckThingGroupDestroy, - Steps: []resource.TestStep{ - { // BASE - Config: testAccThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfigTags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { // CREATE Properties - Config: testAccThingGroupConfig_withPropAttr(thingGroupName, "42"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "42"), - resource.TestCheckResourceAttr(resourceName, "properties.0.description", ""), - ), - }, - { // UPDATE Properties - Config: testAccThingGroupConfig_withPropAttr(thingGroupName, "7"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "7"), - resource.TestCheckResourceAttr(resourceName, "properties.0.description", ""), - ), - }, - { // DELETE Properties - Config: testAccThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfigTags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, }, }) } -func TestAccIoTThingGroup_propsDesc(t *testing.T) { +func TestAccIoTThingGroup_parentGroup(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" + parentResourceName := "aws_iot_thing_group.parent" + grandparentResourceName := "aws_iot_thing_group.grandparent" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -296,62 +132,18 @@ func TestAccIoTThingGroup_propsDesc(t *testing.T) { Providers: acctest.Providers, CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ - { // BASE - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - ), - }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { // CREATE Properties - Config: testAccThingGroupConfig_withPropDesc(thingGroupName, "this is my thing group"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties.0.attributes"), - resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my thing group"), - ), - }, - { // UPDATE Properties - Config: testAccThingGroupConfig_withPropDesc(thingGroupName, "this is my updated thing group"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties.0.attributes"), - resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my updated thing group"), - ), - }, - { // DELETE Properties - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - ), - }, - }, - }) -} - -func TestAccIoTThingGroup_propsAll(t *testing.T) { - var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) - resourceName := "aws_iot_thing_group.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckThingGroupDestroy, - Steps: []resource.TestStep{ - { // BASE - Config: testAccThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfigParentGroup(rName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), + resource.TestCheckResourceAttrPair(resourceName, "parent_group_name", parentResourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "metadata.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "metadata.0.parent_group_name", parentResourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "metadata.0.root_to_parent_groups.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "metadata.0.root_to_parent_groups.0.group_arn", grandparentResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "metadata.0.root_to_parent_groups.0.group_name", grandparentResourceName, "name"), + resource.TestCheckResourceAttrPair(resourceName, "metadata.0.root_to_parent_groups.1.group_arn", parentResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "metadata.0.root_to_parent_groups.1.group_name", parentResourceName, "name"), ), }, { @@ -359,46 +151,13 @@ func TestAccIoTThingGroup_propsAll(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - { // CREATE Properties - Config: testAccThingGroupConfig_withPropAll(thingGroupName, "42", "this is my thing group"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "42"), - resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my thing group"), - ), - }, - { // UPDATE Properties - Config: testAccThingGroupConfig_withPropAll(thingGroupName, "7", "this is my updated thing group"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.%", "3"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.One", "11111"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Two", "TwoTwo"), - resource.TestCheckResourceAttr(resourceName, "properties.0.attributes.Answer", "7"), - resource.TestCheckResourceAttr(resourceName, "properties.0.description", "this is my updated thing group"), - ), - }, - { // DELETE Properties - Config: testAccThingGroupConfig_base(thingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "properties"), - ), - }, }, }) } -func TestAccIoTThingGroup_parent(t *testing.T) { +func TestAccIoTThingGroup_properties(t *testing.T) { var thingGroup iot.DescribeThingGroupOutput - rString := sdkacctest.RandString(8) - thingGroupName := fmt.Sprintf("tf_acc_thing_group_%s", rString) - parentThingGroupName := thingGroupName + "_parent" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -408,10 +167,15 @@ func TestAccIoTThingGroup_parent(t *testing.T) { CheckDestroy: testAccCheckThingGroupDestroy, Steps: []resource.TestStep{ { - Config: testAccThingGroupConfig_base(thingGroupName), + Config: testAccThingGroupConfigProperties(rName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckNoResourceAttr(resourceName, "parent_group_name"), + resource.TestCheckResourceAttr(resourceName, "properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.#", "1"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.0.attributes.%", "1"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.0.attributes.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", "test description 1"), + resource.TestCheckResourceAttr(resourceName, "version", "1"), ), }, { @@ -419,25 +183,17 @@ func TestAccIoTThingGroup_parent(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - { // CREATE parent_group_name - Config: testAccThingGroupConfig_withParent(thingGroupName, parentThingGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName), - ), - }, - { // UPDATE parent_group_name - Config: testAccThingGroupConfig_withParent(thingGroupName, parentThingGroupName+"_updated"), - Check: resource.ComposeTestCheckFunc( - testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", parentThingGroupName+"_updated"), - ), - }, - { // DELETE parent_group_name - Config: testAccThingGroupConfig_base(thingGroupName), + { + Config: testAccThingGroupConfigPropertiesUpdated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckIotThingGroupExists(resourceName, &thingGroup), - resource.TestCheckResourceAttr(resourceName, "parent_group_name", ""), + resource.TestCheckResourceAttr(resourceName, "properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.#", "1"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.0.attributes.%", "2"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.0.attributes.Key2", "Value2"), + resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.0.attributes.Key3", "Value3"), + resource.TestCheckResourceAttr(resourceName, "properties.0.description", "test description 2"), + resource.TestCheckResourceAttr(resourceName, "version", "2"), ), }, }, @@ -501,109 +257,84 @@ resource "aws_iot_thing_group" "test" { `, rName) } -func testAccThingGroupConfig_base(thingGroupName string) string { +func testAccThingGroupConfigTags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { - name = "%s" -} -`, thingGroupName) -} - -func testAccThingGroupConfig_full(thingGroupName, parentThingGroupName, answer, description, tagValue string) string { - return fmt.Sprintf(` -resource "aws_iot_thing_group" "parent" { - name = "%s" -} - -resource "aws_iot_thing_group" "test" { - name = "%s" - parent_group_name = aws_iot_thing_group.parent.name - - properties { - attributes = { - One = "11111" - Two = "TwoTwo" - Answer = "%s" - } - description = "%s" - } + name = %[1]q tags = { - tagKey = "%s" + %[2]q = %[3]q } } -`, parentThingGroupName, thingGroupName, answer, description, tagValue) +`, rName, tagKey1, tagValue1) } -func testAccThingGroupConfig_withTags(thingGroupName, tagValue string) string { +func testAccThingGroupConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { - name = "%s" + name = %[1]q tags = { - tagKey = "%s" + %[2]q = %[3]q + %[4]q = %[5]q } } -`, thingGroupName, tagValue) +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) } -func testAccThingGroupConfig_withPropAttr(thingGroupName, answer string) string { +func testAccThingGroupConfigParentGroup(rName string) string { return fmt.Sprintf(` -resource "aws_iot_thing_group" "test" { - name = "%s" +resource "aws_iot_thing_group" "grandparent" { + name = "%[1]s-grandparent" +} - properties { - attributes = { - One = "11111" - Two = "TwoTwo" - Answer = "%s" - } - } +resource "aws_iot_thing_group" "parent" { + name = "%[1]s-parent" -} -`, thingGroupName, answer) + parent_group_name = aws_iot_thing_group.grandparent.name } -func testAccThingGroupConfig_withPropDesc(thingGroupName, description string) string { - return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { - name = "%s" - - properties { - description = "%s" - } + name = %[1]q + parent_group_name = aws_iot_thing_group.parent.name } -`, thingGroupName, description) +`, rName) } -func testAccThingGroupConfig_withPropAll(thingGroupName, answer, description string) string { +func testAccThingGroupConfigProperties(rName string) string { return fmt.Sprintf(` resource "aws_iot_thing_group" "test" { - name = "%s" + name = %[1]q properties { - attributes = { - One = "11111" - Two = "TwoTwo" - Answer = "%s" + attribute_payload { + attributes = { + Key1 = "Value1" + } } - description = "%s" - } + description = "test description 1" + } } -`, thingGroupName, answer, description) +`, rName) } -func testAccThingGroupConfig_withParent(thingGroupName, parentThingGroupName string) string { +func testAccThingGroupConfigPropertiesUpdated(rName string) string { return fmt.Sprintf(` -resource "aws_iot_thing_group" "parent" { - name = "%s" -} - resource "aws_iot_thing_group" "test" { - name = "%s" - parent_group_name = aws_iot_thing_group.parent.name + name = %[1]q + + properties { + attribute_payload { + attributes = { + Key2 = "Value2" + Key3 = "Value3" + } + } + + description = "test description 2" + } } -`, parentThingGroupName, thingGroupName) +`, rName) } From f299a17d67389572c9038e073c7d884a93c7fafe Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 12:24:02 -0500 Subject: [PATCH 270/304] Add CHANGELOG entry. --- .changelog/21799.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21799.txt diff --git a/.changelog/21799.txt b/.changelog/21799.txt new file mode 100644 index 000000000000..779e4e16e5ef --- /dev/null +++ b/.changelog/21799.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_iot_thing_group +``` \ No newline at end of file From 5a8d6a8b66498c7f5a262fdbb3d5303bbc3aa24d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 12:34:50 -0500 Subject: [PATCH 271/304] Fix typo. --- internal/service/s3control/sweep.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index c8f712cd7045..16f5699a06df 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -61,7 +61,7 @@ func sweepAccessPoints(region string) error { } if err != nil { - return fmt.Errorf("error listing SS3 Access Points (%s): %w", region, err) + return fmt.Errorf("error listing S3 Access Points (%s): %w", region, err) } err = sweep.SweepOrchestrator(sweepResources) From 5907c0c0bbb3a8c779216c5409092fa5da9aa375 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 12:36:05 -0500 Subject: [PATCH 272/304] r/aws_iot_thing_group: Add sweeper. % make sweep SWEEPARGS=-sweep-run=aws_iot_thing_group SWEEP=us-west-2,us-west-1,us-east-2,us-east-1 WARNING: This will destroy infrastructure. Use only in development accounts. go test ./internal/sweep -v -tags=sweep -sweep=us-west-2,us-west-1,us-east-2,us-east-1 -sweep-run=aws_iot_thing_group -timeout 60m 2021/11/18 12:33:56 [DEBUG] Running Sweepers for region (us-west-2): 2021/11/18 12:33:56 [DEBUG] Running Sweeper (aws_iot_policy_attachment) in region (us-west-2) 2021/11/18 12:33:56 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/18 12:33:56 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:33:57 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-6195811425314339175 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-5646206226315228557-grandparent 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-8885168906862677607-grandparent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-4304961202836791216-parent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-4304961202836791216-grandparent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-5271273544858748970 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-1002360090986675451 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-2131691615459568451-parent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-3663987710321595284-grandparent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-8885676072754895414 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-1750511564291611981 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-3663987710321595284-parent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-4237496326368319094-grandparent 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-2131691615459568451-grandparent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-4237496326368319094-parent 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [DEBUG] Deleting IoT Thing Group: tf-acc-test-3339897176340297276 2021/11/18 12:33:58 [DEBUG] Waiting for state to become: [success] 2021/11/18 12:33:58 [TRACE] Waiting 500ms before next try 2021/11/18 12:33:58 [TRACE] Waiting 500ms before next try 2021/11/18 12:33:58 [TRACE] Waiting 500ms before next try 2021/11/18 12:33:58 [TRACE] Waiting 500ms before next try 2021/11/18 12:34:00 [DEBUG] Completed Sweeper (aws_iot_policy_attachment) in region (us-west-2) in 3.72332801s 2021/11/18 12:34:00 Completed Sweepers for region (us-west-2) in 3.723518398s 2021/11/18 12:34:00 Sweeper Tests for region (us-west-2) ran successfully: - aws_iot_policy_attachment 2021/11/18 12:34:00 [DEBUG] Running Sweepers for region (us-west-1): 2021/11/18 12:34:00 [DEBUG] Running Sweeper (aws_iot_policy_attachment) in region (us-west-1) 2021/11/18 12:34:00 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/18 12:34:00 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:34:00 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:34:02 [DEBUG] Completed Sweeper (aws_iot_policy_attachment) in region (us-west-1) in 2.318537361s 2021/11/18 12:34:02 Completed Sweepers for region (us-west-1) in 2.318572572s 2021/11/18 12:34:02 Sweeper Tests for region (us-west-1) ran successfully: - aws_iot_policy_attachment 2021/11/18 12:34:02 [DEBUG] Running Sweepers for region (us-east-2): 2021/11/18 12:34:02 [DEBUG] Running Sweeper (aws_iot_policy_attachment) in region (us-east-2) 2021/11/18 12:34:02 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/18 12:34:02 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:34:03 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:34:03 [DEBUG] Completed Sweeper (aws_iot_policy_attachment) in region (us-east-2) in 879.478468ms 2021/11/18 12:34:03 Completed Sweepers for region (us-east-2) in 879.504201ms 2021/11/18 12:34:03 Sweeper Tests for region (us-east-2) ran successfully: - aws_iot_policy_attachment 2021/11/18 12:34:03 [DEBUG] Running Sweepers for region (us-east-1): 2021/11/18 12:34:03 [DEBUG] Running Sweeper (aws_iot_policy_attachment) in region (us-east-1) 2021/11/18 12:34:03 [INFO] AWS Auth provider used: "EnvProvider" 2021/11/18 12:34:03 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:34:03 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2021/11/18 12:34:04 [DEBUG] Completed Sweeper (aws_iot_policy_attachment) in region (us-east-1) in 507.701903ms 2021/11/18 12:34:04 Completed Sweepers for region (us-east-1) in 507.729908ms 2021/11/18 12:34:04 Sweeper Tests for region (us-east-1) ran successfully: - aws_iot_policy_attachment ok github.com/hashicorp/terraform-provider-aws/internal/sweep 13.481s --- internal/service/iot/sweep.go | 48 +++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/internal/service/iot/sweep.go b/internal/service/iot/sweep.go index 1d0e4c0c1ea1..2735948cafaf 100644 --- a/internal/service/iot/sweep.go +++ b/internal/service/iot/sweep.go @@ -55,6 +55,11 @@ func init() { Dependencies: []string{"aws_iot_thing_principal_attachment"}, }) + resource.AddTestSweepers("aws_iot_thing_group", &resource.Sweeper{ + Name: "aws_iot_policy_attachment", + F: sweepThingGroups, + }) + resource.AddTestSweepers("aws_iot_thing_type", &resource.Sweeper{ Name: "aws_iot_thing_type", F: sweepThingTypes, @@ -475,3 +480,46 @@ func sweepTopicRules(region string) error { return sweeperErrs.ErrorOrNil() } + +func sweepThingGroups(region string) error { + client, err := sweep.SharedRegionalSweepClient(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*conns.AWSClient).IoTConn + input := &iot.ListThingGroupsInput{} + sweepResources := make([]*sweep.SweepResource, 0) + + err = conn.ListThingGroupsPages(input, func(page *iot.ListThingGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, group := range page.ThingGroups { + r := ResourceThingGroup() + d := r.Data(nil) + d.SetId(aws.StringValue(group.GroupName)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + + return !lastPage + }) + + if sweep.SkipSweepError(err) { + log.Printf("[WARN] Skipping IoT Thing Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing IoT Thing Groups (%s): %w", region, err) + } + + err = sweep.SweepOrchestrator(sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping IoT Thing Groups (%s): %w", region, err) + } + + return nil +} From 7090d2ea410478f1ff8726cc0a25387e0187eb24 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 12:37:38 -0500 Subject: [PATCH 273/304] Revert "Fix typo." This reverts commit 08b7fe6d57aa71455c8f7dde55515806285c59fe. --- internal/service/s3control/sweep.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index 16f5699a06df..c8f712cd7045 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -61,7 +61,7 @@ func sweepAccessPoints(region string) error { } if err != nil { - return fmt.Errorf("error listing S3 Access Points (%s): %w", region, err) + return fmt.Errorf("error listing SS3 Access Points (%s): %w", region, err) } err = sweep.SweepOrchestrator(sweepResources) From c4b4a76c3a3b5ddfe6e6815fb13532b4d06333c2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 12:46:44 -0500 Subject: [PATCH 274/304] r/aws_iot_thing_group_membership: Tidy up documentation. --- .../iot_thing_group_membership.html.markdown | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/website/docs/r/iot_thing_group_membership.html.markdown b/website/docs/r/iot_thing_group_membership.html.markdown index 4a4348406e8d..e743fba85236 100644 --- a/website/docs/r/iot_thing_group_membership.html.markdown +++ b/website/docs/r/iot_thing_group_membership.html.markdown @@ -1,33 +1,35 @@ --- +subcategory: "IoT" layout: "aws" page_title: "AWS: aws_iot_thing_group_membership" description: |- - Allow to add IoT Thing to IoT Thing Group. + Adds an IoT Thing to an IoT Thing Group. --- # Resource: aws_iot_thing_group_membership -Allow to add IoT Thing to IoT Thing Group. +Adds an IoT Thing to an IoT Thing Group. ## Example Usage -```hcl -resource "aws_iot_thing_group_membership" "test_attachment" { - thing_name = "test_thing_name" - thing_group_name = "test_thing_group_name" - override_dynamics_group = false +```terraform +resource "aws_iot_thing_group_membership" "example" { + thing_name = "example-thing" + thing_group_name = "example-group" + + override_dynamics_group = true } ``` ## Argument Reference -* `thing_name` - (Required, Forces New Resource). The name of the thing to add to a group. -* `thing_group_name` - (Required, Forces New Resource). The name of the group to which you are adding a thing. -* `override_dynamics_group` - (Optional) Bool. Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. +* `thing_name` - (Required. The name of the thing to add to a group. +* `thing_group_name` - (Required). The name of the group to which you are adding a thing. +* `override_dynamics_group` - (Optional) Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. ## Import -IOT Thing Group Membership can be imported using the name of thing and thing group. +IoT Thing Group Membership can be imported using the name of thing and thing group. ``` $ terraform import aws_iot_thing_group_membership.example thing_name/thing_group From 2785042be87978064639f453c845e4ae8dc82fdd Mon Sep 17 00:00:00 2001 From: rrangith Date: Thu, 18 Nov 2021 11:37:06 -0500 Subject: [PATCH 275/304] Add encryption-in-transit-supported attribute to aws_ec2_instance_type --- internal/service/ec2/instance_type_data_source.go | 6 ++++++ internal/service/ec2/instance_type_data_source_test.go | 1 + website/docs/d/ec2_instance_type.html.markdown | 1 + 3 files changed, 8 insertions(+) diff --git a/internal/service/ec2/instance_type_data_source.go b/internal/service/ec2/instance_type_data_source.go index d11582993d79..a660865283ba 100644 --- a/internal/service/ec2/instance_type_data_source.go +++ b/internal/service/ec2/instance_type_data_source.go @@ -112,6 +112,11 @@ func DataSourceInstanceType() *schema.Resource { Computed: true, }, + "encryption_in_transit_supported": { + Type: schema.TypeBool, + Computed: true, + }, + "fpgas": { Type: schema.TypeSet, Computed: true, @@ -384,6 +389,7 @@ func dataSourceInstanceTypeRead(d *schema.ResourceData, meta interface{}) error } d.Set("efa_supported", v.NetworkInfo.EfaSupported) d.Set("ena_support", v.NetworkInfo.EnaSupport) + d.Set("encryption_in_transit_supported", v.NetworkInfo.EncryptionInTransitSupported) if v.FpgaInfo != nil { fpgaList := make([]interface{}, len(v.FpgaInfo.Fpgas)) for i, fpg := range v.FpgaInfo.Fpgas { diff --git a/internal/service/ec2/instance_type_data_source_test.go b/internal/service/ec2/instance_type_data_source_test.go index c10a88d24ce1..702aecc49fb9 100644 --- a/internal/service/ec2/instance_type_data_source_test.go +++ b/internal/service/ec2/instance_type_data_source_test.go @@ -31,6 +31,7 @@ func TestAccEC2InstanceTypeDataSource_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceBasic, "ebs_optimized_support", "default"), resource.TestCheckResourceAttr(resourceBasic, "efa_supported", "false"), resource.TestCheckResourceAttr(resourceBasic, "ena_support", "required"), + resource.TestCheckResourceAttr(resourceBasic, "encryption_in_transit_supported", "false"), resource.TestCheckResourceAttr(resourceBasic, "free_tier_eligible", "false"), resource.TestCheckResourceAttr(resourceBasic, "hibernation_supported", "true"), resource.TestCheckResourceAttr(resourceBasic, "hypervisor", "nitro"), diff --git a/website/docs/d/ec2_instance_type.html.markdown b/website/docs/d/ec2_instance_type.html.markdown index c76665a69e3a..e1c6ea3b5976 100644 --- a/website/docs/d/ec2_instance_type.html.markdown +++ b/website/docs/d/ec2_instance_type.html.markdown @@ -51,6 +51,7 @@ In addition to the argument above, the following attributes are exported: * `ebs_performance_maximum_throughput` - The maximum throughput performance for an EBS-optimized instance type, in MBps. * `efa_supported` - Indicates whether Elastic Fabric Adapter (EFA) is supported. * `ena_support` - Indicates whether Elastic Network Adapter (ENA) is supported. +* `encryption_in_transit_supported` - Indicates whether encryption in-transit between instances is supported. * `fpgas` - Describes the FPGA accelerator settings for the instance type. * `fpgas.#.count` - The count of FPGA accelerators for the instance type. * `fpgas.#.manufacturer` - The manufacturer of the FPGA accelerator. From 61046811628fe035826c6787af84cc533578d7ca Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 18 Nov 2021 13:32:54 -0500 Subject: [PATCH 276/304] remove moved provider file --- aws/provider.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 aws/provider.go diff --git a/aws/provider.go b/aws/provider.go deleted file mode 100644 index e69de29bb2d1..000000000000 From b0674ad34192e58f8d0ef0231fd3560bcfa3bf54 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 18 Nov 2021 13:34:26 -0500 Subject: [PATCH 277/304] add CHANGELOG for #11941 --- .changelog/11941.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11941.txt diff --git a/.changelog/11941.txt b/.changelog/11941.txt new file mode 100644 index 000000000000..b5428f79a4df --- /dev/null +++ b/.changelog/11941.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_lambda_layer_version_permission +``` \ No newline at end of file From bb4045ca3561f2cc80fcb7cb70808ea627ed730f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 13:39:33 -0500 Subject: [PATCH 278/304] d/aws_cloudfront_response_headers_policy: Add missing 'items' level in 'custom_headers_config' block. --- .../response_headers_policy_data_source.go | 28 +++++++----- ...esponse_headers_policy_data_source_test.go | 44 +++++++++++++++++++ 2 files changed, 62 insertions(+), 10 deletions(-) diff --git a/internal/service/cloudfront/response_headers_policy_data_source.go b/internal/service/cloudfront/response_headers_policy_data_source.go index 6da638eb74b7..6567d924c106 100644 --- a/internal/service/cloudfront/response_headers_policy_data_source.go +++ b/internal/service/cloudfront/response_headers_policy_data_source.go @@ -95,17 +95,25 @@ func DataSourceResponseHeadersPolicy() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "header": { - Type: schema.TypeString, - Computed: true, - }, - "override": { - Type: schema.TypeBool, - Computed: true, - }, - "value": { - Type: schema.TypeString, + "items": { + Type: schema.TypeSet, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeString, + Computed: true, + }, + "override": { + Type: schema.TypeBool, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, }, }, diff --git a/internal/service/cloudfront/response_headers_policy_data_source_test.go b/internal/service/cloudfront/response_headers_policy_data_source_test.go index d30ca0f396c2..43a34346bd4c 100644 --- a/internal/service/cloudfront/response_headers_policy_data_source_test.go +++ b/internal/service/cloudfront/response_headers_policy_data_source_test.go @@ -39,10 +39,16 @@ func TestAccCloudFrontResponseHeadersPolicyDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSource1Name, "cors_config.0.access_control_max_age_sec", resourceName, "cors_config.0.access_control_max_age_sec"), resource.TestCheckResourceAttrPair(dataSource1Name, "cors_config.0.origin_override", resourceName, "cors_config.0.origin_override"), resource.TestCheckResourceAttrPair(dataSource1Name, "custom_headers_config.#", resourceName, "custom_headers_config.#"), + resource.TestCheckResourceAttrPair(dataSource1Name, "custom_headers_config.0.items.#", resourceName, "custom_headers_config.0.items.#"), resource.TestCheckResourceAttrPair(dataSource1Name, "etag", resourceName, "etag"), resource.TestCheckResourceAttrPair(dataSource1Name, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSource1Name, "name", resourceName, "name"), resource.TestCheckResourceAttrPair(dataSource1Name, "security_headers_config.#", resourceName, "security_headers_config.#"), + resource.TestCheckResourceAttrPair(dataSource1Name, "security_headers_config.0.content_security_policy.#", resourceName, "security_headers_config.0.content_security_policy.#"), + resource.TestCheckResourceAttrPair(dataSource1Name, "security_headers_config.0.frame_options.#", resourceName, "security_headers_config.0.frame_options.#"), + resource.TestCheckResourceAttrPair(dataSource1Name, "security_headers_config.0.referrer_policy.#", resourceName, "security_headers_config.0.referrer_policy.#"), + resource.TestCheckResourceAttrPair(dataSource1Name, "security_headers_config.0.strict_transport_security.#", resourceName, "security_headers_config.0.strict_transport_security.#"), + resource.TestCheckResourceAttrPair(dataSource1Name, "security_headers_config.0.xss_protection.#", resourceName, "security_headers_config.0.xss_protection.#"), resource.TestCheckResourceAttrPair(dataSource2Name, "comment", resourceName, "comment"), resource.TestCheckResourceAttrPair(dataSource2Name, "cors_config.#", resourceName, "cors_config.#"), @@ -58,10 +64,16 @@ func TestAccCloudFrontResponseHeadersPolicyDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSource2Name, "cors_config.0.access_control_max_age_sec", resourceName, "cors_config.0.access_control_max_age_sec"), resource.TestCheckResourceAttrPair(dataSource2Name, "cors_config.0.origin_override", resourceName, "cors_config.0.origin_override"), resource.TestCheckResourceAttrPair(dataSource2Name, "custom_headers_config.#", resourceName, "custom_headers_config.#"), + resource.TestCheckResourceAttrPair(dataSource2Name, "custom_headers_config.0.items.#", resourceName, "custom_headers_config.0.items.#"), resource.TestCheckResourceAttrPair(dataSource2Name, "etag", resourceName, "etag"), resource.TestCheckResourceAttrPair(dataSource2Name, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSource2Name, "name", resourceName, "name"), resource.TestCheckResourceAttrPair(dataSource2Name, "security_headers_config.#", resourceName, "security_headers_config.#"), + resource.TestCheckResourceAttrPair(dataSource2Name, "security_headers_config.0.content_security_policy.#", resourceName, "security_headers_config.0.content_security_policy.#"), + resource.TestCheckResourceAttrPair(dataSource2Name, "security_headers_config.0.frame_options.#", resourceName, "security_headers_config.0.frame_options.#"), + resource.TestCheckResourceAttrPair(dataSource2Name, "security_headers_config.0.referrer_policy.#", resourceName, "security_headers_config.0.referrer_policy.#"), + resource.TestCheckResourceAttrPair(dataSource2Name, "security_headers_config.0.strict_transport_security.#", resourceName, "security_headers_config.0.strict_transport_security.#"), + resource.TestCheckResourceAttrPair(dataSource2Name, "security_headers_config.0.xss_protection.#", resourceName, "security_headers_config.0.xss_protection.#"), ), }, }, @@ -99,6 +111,38 @@ resource "aws_cloudfront_response_headers_policy" "test" { origin_override = true } + + custom_headers_config { + items { + header = "X-Header2" + override = false + value = "value2" + } + + items { + header = "X-Header1" + override = true + value = "value1" + } + } + + security_headers_config { + content_security_policy { + content_security_policy = "policy1" + override = true + } + + frame_options { + frame_option = "DENY" + override = false + } + + strict_transport_security { + access_control_max_age_sec = 90 + override = true + preload = true + } + } } `, rName) } From 0a5fdef07a8d8f4e6adb924cb95bc11e109444ba Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 13:43:26 -0500 Subject: [PATCH 279/304] Add CHANGELOG entry. --- .changelog/21838.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21838.txt diff --git a/.changelog/21838.txt b/.changelog/21838.txt new file mode 100644 index 000000000000..a6d92bd95640 --- /dev/null +++ b/.changelog/21838.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_cloudfront_response_headers_policy: Correctly set `custom_headers_config` attribute +``` \ No newline at end of file From 42bd55ccd36b03f62dac500a724123caf9ba60f8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 13:49:09 -0500 Subject: [PATCH 280/304] Add CHANGELOG entry. --- .changelog/21837.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/21837.txt diff --git a/.changelog/21837.txt b/.changelog/21837.txt new file mode 100644 index 000000000000..1d9d808fb0f7 --- /dev/null +++ b/.changelog/21837.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_ec2_instance_type: Add `encryption_in_transit_supported` attribute +``` \ No newline at end of file From 0a684ce0a903b347eb32e05d746db9fcf9db783c Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 18 Nov 2021 13:56:56 -0500 Subject: [PATCH 281/304] align with service packages structure --- .../lambda/layer_version_permission.go | 24 +-- .../lambda/layer_version_permission_test.go | 158 ++++++------------ internal/service/lambda/sweep.go | 48 ++++++ 3 files changed, 116 insertions(+), 114 deletions(-) diff --git a/internal/service/lambda/layer_version_permission.go b/internal/service/lambda/layer_version_permission.go index 532ca91f4874..f816da36df1a 100644 --- a/internal/service/lambda/layer_version_permission.go +++ b/internal/service/lambda/layer_version_permission.go @@ -11,7 +11,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/service/iam" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) func ResourceLayerVersionPermission() *schema.Resource { @@ -27,7 +31,7 @@ func ResourceLayerVersionPermission() *schema.Resource { Schema: map[string]*schema.Schema{ "layer_arn": { Type: schema.TypeString, - ValidateFunc: validateArn, + ValidateFunc: verify.ValidARN, Required: true, ForceNew: true, }, @@ -69,7 +73,7 @@ func ResourceLayerVersionPermission() *schema.Resource { } func resourceLayerVersionPermissionCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lambdaconn + conn := meta.(*conns.AWSClient).LambdaConn params := &lambda.AddLayerVersionPermissionInput{ LayerName: aws.String(d.Get("layer_arn").(string)), @@ -91,13 +95,13 @@ func resourceLayerVersionPermissionCreate(d *schema.ResourceData, meta interface d.SetId(fmt.Sprintf("%s:%s", *params.LayerName, strconv.FormatInt(*params.VersionNumber, 10))) - return resourceAwsLambdaLayerVersionPermissionRead(d, meta) + return resourceLayerVersionPermissionRead(d, meta) } func resourceLayerVersionPermissionRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lambdaconn + conn := meta.(*conns.AWSClient).LambdaConn - layerName, layerArn, version, err := resourceLayerVersionPermissionParseId(d.Id()) + layerName, layerArn, version, err := ResourceLayerVersionPermissionParseId(d.Id()) if err != nil { return fmt.Errorf("Error parsing lambda layer ID: %s", err) } @@ -107,7 +111,7 @@ func resourceLayerVersionPermissionRead(d *schema.ResourceData, meta interface{} VersionNumber: aws.Int64(version), }) - if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { log.Printf("[WARN] Lambda Layer Version (%s) not found, removing it's permission from state", d.Id()) d.SetId("") return nil @@ -117,7 +121,7 @@ func resourceLayerVersionPermissionRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error reading Lambda Layer version permission (%s): %s", d.Id(), err) } - policyDoc := &IAMPolicyDoc{} + policyDoc := &iam.IAMPolicyDoc{} if err := json.Unmarshal([]byte(aws.StringValue(layerVersionPolicyOutput.Policy)), policyDoc); err != nil { return err @@ -166,9 +170,9 @@ func resourceLayerVersionPermissionRead(d *schema.ResourceData, meta interface{} } func resourceLayerVersionPermissionDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lambdaconn + conn := meta.(*conns.AWSClient).LambdaConn - layerName, _, version, err := resourceAwsLambdaLayerVersionPermissionParseId(d.Id()) + layerName, _, version, err := ResourceLayerVersionPermissionParseId(d.Id()) if err != nil { return fmt.Errorf("Error parsing lambda layer ID: %s", err) } @@ -186,7 +190,7 @@ func resourceLayerVersionPermissionDelete(d *schema.ResourceData, meta interface return nil } -func resourceLayerVersionPermissionParseId(id string) (layerName string, layerARN string, version int64, err error) { +func ResourceLayerVersionPermissionParseId(id string) (layerName string, layerARN string, version int64, err error) { arn, err := arn.Parse(id) if err != nil { return diff --git a/internal/service/lambda/layer_version_permission_test.go b/internal/service/lambda/layer_version_permission_test.go index 481a483bda47..6d5ed77d5ebc 100644 --- a/internal/service/lambda/layer_version_permission_test.go +++ b/internal/service/lambda/layer_version_permission_test.go @@ -2,84 +2,36 @@ package lambda_test import ( "fmt" - "log" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tflambda "github.com/hashicorp/terraform-provider-aws/internal/service/lambda" ) -func init() { - resource.AddTestSweepers("aws_lambda_layer_version_permission", &resource.Sweeper{ - Name: "aws_lambda_layer_version_permission", - F: testSweepLambdaLayerVersionPermission, - }) -} - -func testSweepLambdaLayerVersionPermission(region string) error { - client, err := sharedClientForRegion(region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } - - lambdaconn := client.(*AWSClient).lambdaconn - resp, err := lambdaconn.ListLayers(&lambda.ListLayersInput{}) - if err != nil { - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Lambda Layer sweep for %s: %s", region, err) - return nil - } - return fmt.Errorf("Error retrieving Lambda layers: %s", err) - } - - if len(resp.Layers) == 0 { - log.Print("[DEBUG] No aws lambda layers to sweep") - return nil - } - - for _, l := range resp.Layers { - versionResp, err := lambdaconn.ListLayerVersions(&lambda.ListLayerVersionsInput{ - LayerName: l.LayerName, - }) - if err != nil { - return fmt.Errorf("Error retrieving versions for lambda layer: %s", err) - } - - for _, v := range versionResp.LayerVersions { - _, err := lambdaconn.DeleteLayerVersion(&lambda.DeleteLayerVersionInput{ - LayerName: l.LayerName, - VersionNumber: v.Version, - }) - if err != nil { - return err - } - } - } - - return nil -} - -func TestAccAWSLambdaLayerVersionPermission_all(t *testing.T) { - resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" - rName := acctest.RandomWithPrefix("tf-acc-test") +func TestLambdaLayerVersionPermission_all(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { acctest.PreCheck(t) }, + Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission_all(rName), + Config: testLayerVersionPermission_all(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", rName), testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "*"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), - resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.lambda_layer", "layer_arn"), + resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.test", "layer_arn"), ), }, @@ -92,25 +44,24 @@ func TestAccAWSLambdaLayerVersionPermission_all(t *testing.T) { }) } -func TestAccAWSLambdaLayerVersionPermission_org(t *testing.T) { - resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" - rName := acctest.RandomWithPrefix("tf-acc-test") +func TestLambdaLayerVersionPermission_org(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { acctest.PreCheck(t) }, + Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission_org(rName), + Config: testLayerVersionPermission_org(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", rName), testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "*"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), resource.TestCheckResourceAttr(resourceName, "organization_id", "o-0123456789"), - resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.lambda_layer", "layer_arn"), + resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.test", "layer_arn"), ), }, @@ -123,24 +74,23 @@ func TestAccAWSLambdaLayerVersionPermission_org(t *testing.T) { }) } -func TestAccAWSLambdaLayerVersionPermission_account(t *testing.T) { - resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" - rName := acctest.RandomWithPrefix("tf-acc-test") +func TestLambdaLayerVersionPermission_account(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { acctest.PreCheck(t) }, + Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission_account(rName), + Config: testLayerVersionPermission_account(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionExists("aws_lambda_layer_version.lambda_layer", rName), testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "456789820214"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), - resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.lambda_layer", "layer_arn"), + resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.test", "layer_arn"), ), }, @@ -153,20 +103,20 @@ func TestAccAWSLambdaLayerVersionPermission_account(t *testing.T) { }) } -func TestAccAWSLambdaLayerVersionPermission_disappears(t *testing.T) { - resourceName := "aws_lambda_layer_version_permission.lambda_layer_permission" - rName := acctest.RandomWithPrefix("tf-acc-test") +func TestLambdaLayerVersionPermission_disappears(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { acctest.PreCheck(t) }, + Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLambdaLayerVersionPermission_account(rName), + Config: testLayerVersionPermission_account(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), - testAccCheckResourceDisappears(testAccProvider, resourceAwsLambdaLayerVersionPermission(), resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tflambda.ResourceLayerVersionPermission(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -176,16 +126,16 @@ func TestAccAWSLambdaLayerVersionPermission_disappears(t *testing.T) { // Creating Lambda layer and Lambda layer permissions -func testAccAWSLambdaLayerVersionPermission_all(layerName string) string { +func testLayerVersionPermission_all(layerName string) string { return fmt.Sprintf(` -resource "aws_lambda_layer_version" "lambda_layer" { +resource "aws_lambda_layer_version" "test" { filename = "test-fixtures/lambdatest.zip" layer_name = "%s" } -resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn - layer_version = aws_lambda_layer_version.lambda_layer.version +resource "aws_lambda_layer_version_permission" "test" { + layer_arn = aws_lambda_layer_version.test.layer_arn + layer_version = aws_lambda_layer_version.test.version action = "lambda:GetLayerVersion" statement_id = "xaccount" principal = "*" @@ -193,16 +143,16 @@ resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { `, layerName) } -func testAccAWSLambdaLayerVersionPermission_org(layerName string) string { +func testLayerVersionPermission_org(layerName string) string { return fmt.Sprintf(` -resource "aws_lambda_layer_version" "lambda_layer" { +resource "aws_lambda_layer_version" "test" { filename = "test-fixtures/lambdatest.zip" layer_name = "%s" } -resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn - layer_version = aws_lambda_layer_version.lambda_layer.version +resource "aws_lambda_layer_version_permission" "test" { + layer_arn = aws_lambda_layer_version.test.layer_arn + layer_version = aws_lambda_layer_version.test.version action = "lambda:GetLayerVersion" statement_id = "xaccount" principal = "*" @@ -211,16 +161,16 @@ resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { `, layerName) } -func testAccAWSLambdaLayerVersionPermission_account(layerName string) string { +func testLayerVersionPermission_account(layerName string) string { return fmt.Sprintf(` -resource "aws_lambda_layer_version" "lambda_layer" { +resource "aws_lambda_layer_version" "test" { filename = "test-fixtures/lambdatest.zip" layer_name = "%s" } -resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = aws_lambda_layer_version.lambda_layer.layer_arn - layer_version = aws_lambda_layer_version.lambda_layer.version +resource "aws_lambda_layer_version_permission" "test" { + layer_arn = aws_lambda_layer_version.test.layer_arn + layer_version = aws_lambda_layer_version.test.version action = "lambda:GetLayerVersion" statement_id = "xaccount" principal = "456789820214" @@ -243,19 +193,19 @@ func testAccCheckAwsLambdaLayerVersionPermissionExists(res, layerName string) re return fmt.Errorf("Lambda Layer Version Permission not set") } - _, _, version, err := resourceAwsLambdaLayerVersionPermissionParseId(rs.Primary.Attributes["id"]) + _, _, version, err := tflambda.ResourceLayerVersionPermissionParseId(rs.Primary.Attributes["id"]) if err != nil { return fmt.Errorf("Error parsing lambda layer ID: %s", err) } - conn := testAccProvider.Meta().(*AWSClient).lambdaconn + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaConn _, err = conn.GetLayerVersionPolicy(&lambda.GetLayerVersionPolicyInput{ LayerName: aws.String(layerName), VersionNumber: aws.Int64(version), }) - if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { return err } @@ -264,14 +214,14 @@ func testAccCheckAwsLambdaLayerVersionPermissionExists(res, layerName string) re } func testAccCheckLambdaLayerVersionPermissionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).lambdaconn + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaConn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lambda_layer_version_permission" { continue } - layerName, _, version, err := resourceAwsLambdaLayerVersionPermissionParseId(rs.Primary.ID) + layerName, _, version, err := tflambda.ResourceLayerVersionPermissionParseId(rs.Primary.ID) if err != nil { return err } @@ -281,7 +231,7 @@ func testAccCheckLambdaLayerVersionPermissionDestroy(s *terraform.State) error { VersionNumber: aws.Int64(version), }) - if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { continue } if err != nil { diff --git a/internal/service/lambda/sweep.go b/internal/service/lambda/sweep.go index a7681a5b928f..af8db1a63f80 100644 --- a/internal/service/lambda/sweep.go +++ b/internal/service/lambda/sweep.go @@ -23,6 +23,11 @@ func init() { Name: "aws_lambda_layer", F: sweepLayerVersions, }) + + resource.AddTestSweepers("aws_lambda_layer_version_permission", &resource.Sweeper{ + Name: "aws_lambda_layer_version_permission", + F: sweepLayerVersionPermissions, + }) } func sweepFunctions(region string) error { @@ -102,3 +107,46 @@ func sweepLayerVersions(region string) error { return nil } + +func sweepLayerVersionPermissions(region string) error { + client, err := sweep.SharedRegionalSweepClient(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + + lambdaconn := client.(*AWSClient).lambdaconn + resp, err := lambdaconn.ListLayers(&lambda.ListLayersInput{}) + if err != nil { + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Lambda Layer sweep for %s: %s", region, err) + return nil + } + return fmt.Errorf("Error retrieving Lambda layers: %s", err) + } + + if len(resp.Layers) == 0 { + log.Print("[DEBUG] No aws lambda layers to sweep") + return nil + } + + for _, l := range resp.Layers { + versionResp, err := lambdaconn.ListLayerVersions(&lambda.ListLayerVersionsInput{ + LayerName: l.LayerName, + }) + if err != nil { + return fmt.Errorf("Error retrieving versions for lambda layer: %s", err) + } + + for _, v := range versionResp.LayerVersions { + _, err := lambdaconn.DeleteLayerVersion(&lambda.DeleteLayerVersionInput{ + LayerName: l.LayerName, + VersionNumber: v.Version, + }) + if err != nil { + return err + } + } + } + + return nil +} From 9712eb5c55da4b33f17b97bf234650bafc417d9d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 14:59:43 -0500 Subject: [PATCH 282/304] r/aws_iot_thing_group_membership: First acceptance tests passing. --- .changelog/21799.txt | 4 + internal/service/iot/find.go | 62 +++++ .../service/iot/thing_group_membership.go | 132 +++++------ .../iot/thing_group_membership_test.go | 224 +++++++++++++----- .../iot_thing_group_membership.html.markdown | 8 +- 5 files changed, 288 insertions(+), 142 deletions(-) diff --git a/.changelog/21799.txt b/.changelog/21799.txt index 779e4e16e5ef..a82dbceb7016 100644 --- a/.changelog/21799.txt +++ b/.changelog/21799.txt @@ -1,3 +1,7 @@ ```release-note:new-resource aws_iot_thing_group +``` + +```release-note:new-resource +aws_iot_thing_group_membership ``` \ No newline at end of file diff --git a/internal/service/iot/find.go b/internal/service/iot/find.go index 2e5d0cf7f681..454286eb88a6 100644 --- a/internal/service/iot/find.go +++ b/internal/service/iot/find.go @@ -33,6 +33,31 @@ func FindAuthorizerByName(conn *iot.IoT, name string) (*iot.AuthorizerDescriptio return output.AuthorizerDescription, nil } +func FindThingByName(conn *iot.IoT, name string) (*iot.DescribeThingOutput, error) { + input := &iot.DescribeThingInput{ + ThingName: aws.String(name), + } + + output, err := conn.DescribeThing(input) + + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func FindThingGroupByName(conn *iot.IoT, name string) (*iot.DescribeThingGroupOutput, error) { input := &iot.DescribeThingGroupInput{ ThingGroupName: aws.String(name), @@ -57,3 +82,40 @@ func FindThingGroupByName(conn *iot.IoT, name string) (*iot.DescribeThingGroupOu return output, nil } + +func FindThingGroupMembership(conn *iot.IoT, thingGroupName, thingName string) error { + input := &iot.ListThingGroupsForThingInput{ + ThingName: aws.String(thingName), + } + + var v *iot.GroupNameAndArn + + err := conn.ListThingGroupsForThingPages(input, func(page *iot.ListThingGroupsForThingOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, group := range page.ThingGroups { + if aws.StringValue(group.GroupName) == thingGroupName { + v = group + + return false + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + return &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if v == nil { + return tfresource.NewEmptyResultError(input) + } + + return nil +} diff --git a/internal/service/iot/thing_group_membership.go b/internal/service/iot/thing_group_membership.go index a83626d01a34..3939c31ef875 100644 --- a/internal/service/iot/thing_group_membership.go +++ b/internal/service/iot/thing_group_membership.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" "github.com/hashicorp/aws-sdk-go-base/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func ResourceThingGroupMembership() *schema.Resource { @@ -18,14 +18,15 @@ func ResourceThingGroupMembership() *schema.Resource { Create: resourceThingGroupMembershipCreate, Read: resourceThingGroupMembershipRead, Delete: resourceThingGroupMembershipDelete, + Importer: &schema.ResourceImporter{ - State: resourceThingGroupMembershipImport, + State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ - "thing_name": { - Type: schema.TypeString, - Required: true, + "override_dynamic_group": { + Type: schema.TypeBool, + Optional: true, ForceNew: true, }, "thing_group_name": { @@ -33,9 +34,9 @@ func ResourceThingGroupMembership() *schema.Resource { Required: true, ForceNew: true, }, - "override_dynamics_group": { - Type: schema.TypeBool, - Optional: true, + "thing_name": { + Type: schema.TypeString, + Required: true, ForceNew: true, }, }, @@ -45,21 +46,25 @@ func ResourceThingGroupMembership() *schema.Resource { func resourceThingGroupMembershipCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - params := &iot.AddThingToThingGroupInput{} - params.ThingName = aws.String(d.Get("thing_name").(string)) - params.ThingGroupName = aws.String(d.Get("thing_group_name").(string)) + thingGroupName := d.Get("thing_group_name").(string) + thingName := d.Get("thing_name").(string) + input := &iot.AddThingToThingGroupInput{ + ThingGroupName: aws.String(thingGroupName), + ThingName: aws.String(thingName), + } - if v, ok := d.GetOk("override_dynamics_group"); ok { - params.OverrideDynamicGroups = aws.Bool(v.(bool)) + if v, ok := d.GetOk("override_dynamic_group"); ok { + input.OverrideDynamicGroups = aws.Bool(v.(bool)) } - _, err := conn.AddThingToThingGroup(params) + log.Printf("[DEBUG] Creating IoT Thing Group Membership: %s", input) + _, err := conn.AddThingToThingGroup(input) if err != nil { - return err + return fmt.Errorf("error adding IoT Thing (%s) to IoT Thing Group (%s): %w", thingName, thingGroupName, err) } - d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-%s", *params.ThingName, *params.ThingGroupName))) + d.SetId(ThingGroupMembershipCreateResourceID(thingGroupName, thingName)) return resourceThingGroupMembershipRead(d, meta) } @@ -67,32 +72,26 @@ func resourceThingGroupMembershipCreate(d *schema.ResourceData, meta interface{} func resourceThingGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - thingName := d.Get("thing_name").(string) - thingGroupName := d.Get("thing_group_name").(string) + thingGroupName, thingName, err := ThingGroupMembershipParseResourceID(d.Id()) + + if err != nil { + return err + } - hasThingGroup, err := IotThingHasThingGroup(conn, thingName, thingGroupName, "") + err = FindThingGroupMembership(conn, thingGroupName, thingName) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] IoT Thing (%s) is not found", thingName) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] IoT Thing Group Membership (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return fmt.Errorf("error finding IoT Thing Group (%s) of thing (%s): %s", thingGroupName, thingName, err) + return fmt.Errorf("error reading IoT Thing Group Membership (%s): %w", d.Id(), err) } - if !hasThingGroup { - log.Printf("[WARN] IoT Thing Group (%s) is not found in Thing (%s) group list", thingGroupName, thingName) - d.SetId("") - return nil - } - - d.Set("thing_name", thingName) d.Set("thing_group_name", thingGroupName) - if v, ok := d.GetOk("override_dynamics_group"); ok { - d.Set("override_dynamics_group", v.(bool)) - } + d.Set("thing_name", thingName) return nil } @@ -100,67 +99,44 @@ func resourceThingGroupMembershipRead(d *schema.ResourceData, meta interface{}) func resourceThingGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - params := &iot.RemoveThingFromThingGroupInput{} - params.ThingName = aws.String(d.Get("thing_name").(string)) - params.ThingGroupName = aws.String(d.Get("thing_group_name").(string)) - - _, err := conn.RemoveThingFromThingGroup(params) + thingGroupName, thingName, err := ThingGroupMembershipParseResourceID(d.Id()) if err != nil { return err } - return nil -} + log.Printf("[DEBUG] Deleting IoT Thing Group Membership: %s", d.Id()) + _, err = conn.RemoveThingFromThingGroup(&iot.RemoveThingFromThingGroupInput{ + ThingGroupName: aws.String(thingGroupName), + ThingName: aws.String(thingName), + }) -func resourceThingGroupMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - idParts := strings.SplitN(d.Id(), "/", 2) - if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { - return nil, fmt.Errorf("unexpected format of ID (%q), expected /", d.Id()) + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + return nil } - thingName := idParts[0] - thingGroupName := idParts[1] - - d.Set("thing_name", thingName) - d.Set("thing_group_name", thingGroupName) - - d.SetId(fmt.Sprintf("%s-%s", thingName, thingGroupName)) + if err != nil { + return fmt.Errorf("error removing IoT Thing (%s) from IoT Thing Group (%s): %w", thingName, thingGroupName, err) + } - return []*schema.ResourceData{d}, nil + return nil } -func IotThingHasThingGroup(conn *iot.IoT, thingName string, thingGroupName string, nextToken string) (bool, error) { - maxResults := int64(20) +const thingGroupMembershipResourceIDSeparator = "/" - params := &iot.ListThingGroupsForThingInput{ - MaxResults: aws.Int64(maxResults), - ThingName: aws.String(thingName), - } +func ThingGroupMembershipCreateResourceID(thingGroupName, thingName string) string { + parts := []string{thingGroupName, thingName} + id := strings.Join(parts, thingGroupMembershipResourceIDSeparator) - if len(nextToken) > 0 { - params.NextToken = aws.String(nextToken) - } + return id +} - out, err := conn.ListThingGroupsForThing(params) - if err != nil { - return false, err - } +func ThingGroupMembershipParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, thingGroupMembershipResourceIDSeparator) - // Check if searched group is in current collection - // If it is return true - for _, group := range out.ThingGroups { - if thingGroupName == *group.GroupName { - return true, nil - } + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil } - // If group that we searched for not appear in current list - // then check if NextToken exists. If it is so call hasThingGroup - // recursively to search in next part of list. Otherwise return false - if out.NextToken != nil { - return IotThingHasThingGroup(conn, thingName, thingGroupName, *out.NextToken) - } else { - return false, nil - } + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected thing-group-name%[2]sthing-name", id, thingGroupMembershipResourceIDSeparator) } diff --git a/internal/service/iot/thing_group_membership_test.go b/internal/service/iot/thing_group_membership_test.go index eefb5d645947..273316ce43c1 100644 --- a/internal/service/iot/thing_group_membership_test.go +++ b/internal/service/iot/thing_group_membership_test.go @@ -5,17 +5,19 @@ import ( "testing" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfiot "github.com/hashicorp/terraform-provider-aws/internal/service/iot" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccIoTThingGroupMembership_basic(t *testing.T) { - rString := sdkacctest.RandString(8) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iot_thing_group_membership.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -24,50 +26,149 @@ func TestAccIoTThingGroupMembership_basic(t *testing.T) { CheckDestroy: testAccCheckThingGroupMembershipDestroy, Steps: []resource.TestStep{ { - Config: testAccThingGroupMembershipConfig_basic(rString), + Config: testAccThingGroupMembershipConfig(rName1, rName2), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_iot_thing_group_membership.test_attachment", "thing_name", fmt.Sprintf("test_thing_%s", rString)), - resource.TestCheckResourceAttr("aws_iot_thing_group_membership.test_attachment", "thing_group_name", fmt.Sprintf("test_group_%s", rString)), - resource.TestCheckResourceAttr("aws_iot_thing_group_membership.test_attachment", "override_dynamics_group", "false"), - testAccCheckThingGroupMembershipExists(rString), + testAccCheckThingGroupMembershipExists(resourceName), + resource.TestCheckNoResourceAttr(resourceName, "override_dynamic_group"), + resource.TestCheckResourceAttr(resourceName, "thing_group_name", rName1), + resource.TestCheckResourceAttr(resourceName, "thing_name", rName2), ), }, { - ResourceName: "aws_iot_thing_group_membership.test_attachment", - ImportStateIdFunc: testAccCheckThingGroupMembershipImportStateIdFunc("aws_iot_thing_group_membership.test_attachment"), + ResourceName: resourceName, ImportState: true, - // We do not have a way to align IDs since the Create function uses resource.PrefixedUniqueId() - // Failed state verification, resource with ID ROLE-POLICYARN not found - // ImportStateVerify: true, + ImportStateVerify: true, }, }, }) } -func testAccCheckThingGroupMembershipExists(rString string) resource.TestCheckFunc { +func TestAccIoTThingGroupMembership_disappears(t *testing.T) { + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iot_thing_group_membership.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupMembershipDestroy, + Steps: []resource.TestStep{ + { + Config: testAccThingGroupMembershipConfig(rName1, rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckThingGroupMembershipExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfiot.ResourceThingGroupMembership(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccIoTThingGroupMembership_disappears_Thing(t *testing.T) { + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iot_thing_group_membership.test" + thingResourceName := "aws_iot_thing.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupMembershipDestroy, + Steps: []resource.TestStep{ + { + Config: testAccThingGroupMembershipConfig(rName1, rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckThingGroupMembershipExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfiot.ResourceThing(), thingResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccIoTThingGroupMembership_disappears_ThingGroup(t *testing.T) { + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iot_thing_group_membership.test" + thingGroupResourceName := "aws_iot_thing_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupMembershipDestroy, + Steps: []resource.TestStep{ + { + Config: testAccThingGroupMembershipConfig(rName1, rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckThingGroupMembershipExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfiot.ResourceThingGroup(), thingGroupResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccIoTThingGroupMembership_overrideDynamicGroup(t *testing.T) { + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iot_thing_group_membership.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, iot.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckThingGroupMembershipDestroy, + Steps: []resource.TestStep{ + { + Config: testAccThingGroupMembershipConfigOverrideDynamicGroup(rName1, rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckThingGroupMembershipExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "override_dynamic_group", "true"), + resource.TestCheckResourceAttr(resourceName, "thing_group_name", rName1), + resource.TestCheckResourceAttr(resourceName, "thing_name", rName2), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + // override_dynamic_group + }, + }, + }) +} + +func testAccCheckThingGroupMembershipExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn + if rs.Primary.ID == "" { + return fmt.Errorf("No IoT Thing Group Membership ID is set") + } - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iot_thing_group_membership" { - continue - } + thingGroupName, thingName, err := tfiot.ThingGroupMembershipParseResourceID(rs.Primary.ID) - thingName := rs.Primary.Attributes["thing_name"] - thingGroupName := rs.Primary.Attributes["thing_group_name"] - hasThingGroup, err := tfiot.IotThingHasThingGroup(conn, thingName, thingGroupName, "") + if err != nil { + return err + } - if err != nil { - return err - } + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn - if !hasThingGroup { - return fmt.Errorf("IoT Thing (%s) is not in IoT Thing Group (%s)", thingName, thingGroupName) - } + err = tfiot.FindThingGroupMembership(conn, thingGroupName, thingName) - return nil + if err != nil { + return err } + return nil } } @@ -80,57 +181,60 @@ func testAccCheckThingGroupMembershipDestroy(s *terraform.State) error { continue } - thingName := rs.Primary.Attributes["thing_name"] - thingGroupName := rs.Primary.Attributes["thing_group_name"] + thingGroupName, thingName, err := tfiot.ThingGroupMembershipParseResourceID(rs.Primary.ID) + + if err != nil { + return err + } - hasThingGroup, err := tfiot.IotThingHasThingGroup(conn, thingName, thingGroupName, "") + err = tfiot.FindThingGroupMembership(conn, thingGroupName, thingName) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return nil + if tfresource.NotFound(err) { + continue } if err != nil { return err } - if hasThingGroup { - return fmt.Errorf("IoT Thing (%s) still in IoT Thing Group (%s)", thingName, thingGroupName) - } + return fmt.Errorf("IoT Thing Group Membership %s still exists", rs.Primary.ID) } + return nil } -func testAccCheckThingGroupMembershipImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("Not found: %s", resourceName) - } +func testAccThingGroupMembershipConfig(rName1, rName2 string) string { + return fmt.Sprintf(` +resource "aws_iot_thing_group" "test" { + name = %[1]q +} - return fmt.Sprintf("%s/%s", rs.Primary.Attributes["thing_name"], rs.Primary.Attributes["thing_group_name"]), nil - } +resource "aws_iot_thing" "test" { + name = %[2]q } -func testAccThingGroupMembershipConfig_basic(rString string) string { +resource "aws_iot_thing_group_membership" "test" { + thing_group_name = aws_iot_thing_group.test.name + thing_name = aws_iot_thing.test.name +} +`, rName1, rName2) +} + +func testAccThingGroupMembershipConfigOverrideDynamicGroup(rName1, rName2 string) string { return fmt.Sprintf(` -resource "aws_iot_thing" "test_thing" { - name = "test_thing_%s" +resource "aws_iot_thing_group" "test" { + name = %[1]q } -resource "aws_iot_thing_group" "test_thing_group" { - name = "test_group_%[1]s" - properties { - attributes = { - "attr1": "val1", - } - merge = false - } +resource "aws_iot_thing" "test" { + name = %[2]q } -resource "aws_iot_thing_group_membership" "test_attachment" { - thing_name = "${aws_iot_thing.test_thing.name}" - thing_group_name = "${aws_iot_thing_group.test_thing_group.name}" - override_dynamics_group = false +resource "aws_iot_thing_group_membership" "test" { + thing_group_name = aws_iot_thing_group.test.name + thing_name = aws_iot_thing.test.name + + override_dynamic_group = true } -`, rString) +`, rName1, rName2) } diff --git a/website/docs/r/iot_thing_group_membership.html.markdown b/website/docs/r/iot_thing_group_membership.html.markdown index e743fba85236..a188ee250c6e 100644 --- a/website/docs/r/iot_thing_group_membership.html.markdown +++ b/website/docs/r/iot_thing_group_membership.html.markdown @@ -17,7 +17,7 @@ resource "aws_iot_thing_group_membership" "example" { thing_name = "example-thing" thing_group_name = "example-group" - override_dynamics_group = true + override_dynamic_group = true } ``` @@ -25,12 +25,12 @@ resource "aws_iot_thing_group_membership" "example" { * `thing_name` - (Required. The name of the thing to add to a group. * `thing_group_name` - (Required). The name of the group to which you are adding a thing. -* `override_dynamics_group` - (Optional) Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. +* `override_dynamic_group` - (Optional) Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. ## Import -IoT Thing Group Membership can be imported using the name of thing and thing group. +IoT Thing Group Membership can be imported using the thing group name and thing name. ``` -$ terraform import aws_iot_thing_group_membership.example thing_name/thing_group +$ terraform import aws_iot_thing_group_membership.example thing_group_name/thing_name ``` From 54f6048b29522d106019fb7da3de55bc1584750b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:15:13 -0500 Subject: [PATCH 283/304] Add changelog --- .changelog/11997.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11997.txt diff --git a/.changelog/11997.txt b/.changelog/11997.txt new file mode 100644 index 000000000000..736d2cdfea5a --- /dev/null +++ b/.changelog/11997.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lambda_layer_version: Add `skip_destroy` attribute +``` \ No newline at end of file From f291c7eebf10c1d544d2d61a3b32734801444e8a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:15:47 -0500 Subject: [PATCH 284/304] lambda_layer_version: Add argument to docs --- .../docs/r/lambda_layer_version.html.markdown | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/website/docs/r/lambda_layer_version.html.markdown b/website/docs/r/lambda_layer_version.html.markdown index 9143f9861ead..16e95bfed515 100644 --- a/website/docs/r/lambda_layer_version.html.markdown +++ b/website/docs/r/lambda_layer_version.html.markdown @@ -32,22 +32,26 @@ Once you have created your deployment package you can specify it either directly indirectly via Amazon S3 (using the `s3_bucket`, `s3_key` and `s3_object_version` arguments). When providing the deployment package via S3 it may be useful to use [the `aws_s3_bucket_object` resource](s3_bucket_object.html) to upload it. -For larger deployment packages it is recommended by Amazon to upload via S3, since the S3 API has better support for uploading -large files efficiently. +For larger deployment packages it is recommended by Amazon to upload via S3, since the S3 API has better support for uploading large files efficiently. ## Argument Reference -* `layer_name` (Required) A unique name for your Lambda Layer -* `filename` (Optional) The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. -* `s3_bucket` - (Optional) The S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function. -* `s3_key` - (Optional) The S3 key of an object containing the function's deployment package. Conflicts with `filename`. -* `s3_object_version` - (Optional) The object version containing the function's deployment package. Conflicts with `filename`. -* `compatible_runtimes` - (Optional) A list of [Runtimes][2] this layer is compatible with. Up to 5 runtimes can be specified. -* `compatible_architectures` - (Optional) A list of [Architectures][4] this layer is compatible with. Currently `x86_64` and `arm64` can be specified. +The following arguments are required: + +* `layer_name` - (Required) A unique name for your Lambda Layer + +The following arguments are optional: + +* `compatible_architectures` - (Optional) List of [Architectures][4] this layer is compatible with. Currently `x86_64` and `arm64` can be specified. +* `compatible_runtimes` - (Optional) List of [Runtimes][2] this layer is compatible with. Up to 5 runtimes can be specified. * `description` - (Optional) Description of what your Lambda Layer does. +* `filename` (Optional) Path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. * `license_info` - (Optional) License info for your Lambda Layer. See [License Info][3]. +* `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function. +* `s3_key` - (Optional) S3 key of an object containing the function's deployment package. Conflicts with `filename`. +* `s3_object_version` - (Optional) Object version containing the function's deployment package. Conflicts with `filename`. +* `skip_destroy` - (Optional) Whether to retain old versions of previously deployed Lambda Layers. Default is `false`. * `source_code_hash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `${filebase64sha256("file.zip")}` (Terraform 0.11.12 or later) or `${base64sha256(file("file.zip"))}` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda layer source archive. -* `retain` - (Optional) Retains old versions of previously deployed Lambda Layers if true. Defaults to false. ## Attributes Reference From 85d17b2d6062316647dd08f9e7a6803c21372838 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:16:38 -0500 Subject: [PATCH 285/304] lambda_layer_version: Add 'skip_destroy' argument --- internal/service/lambda/layer_version.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/internal/service/lambda/layer_version.go b/internal/service/lambda/layer_version.go index 58e94c5d6f25..2ab2cefc343a 100644 --- a/internal/service/lambda/layer_version.go +++ b/internal/service/lambda/layer_version.go @@ -118,6 +118,11 @@ func ResourceLayerVersion() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "skip_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "source_code_size": { Type: schema.TypeInt, Computed: true, @@ -126,11 +131,6 @@ func ResourceLayerVersion() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "retain": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, }, } } @@ -265,12 +265,11 @@ func resourceLayerVersionRead(d *schema.ResourceData, meta interface{}) error { } func resourceLayerVersionDelete(d *schema.ResourceData, meta interface{}) error { - retain := d.Get("retain").(bool) - if retain { - log.Printf("[DEBUG] Retaining Lambda Layer %q", d.Get("arn").(string)) + if v, ok := d.GetOk("skip_destroy"); ok && v.(bool) { + log.Printf("[DEBUG] Retaining Lambda Layer Version %q", d.Id()) return nil } - + conn := meta.(*conns.AWSClient).LambdaConn version, err := strconv.ParseInt(d.Get("version").(string), 10, 64) From 29262f2e2fb0820104c688736cf60f25a36c17f2 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:24:14 -0500 Subject: [PATCH 286/304] docs/lambda_layer_version: Clarify docs --- website/docs/r/lambda_layer_version.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/lambda_layer_version.html.markdown b/website/docs/r/lambda_layer_version.html.markdown index 16e95bfed515..2ca7b625ccdb 100644 --- a/website/docs/r/lambda_layer_version.html.markdown +++ b/website/docs/r/lambda_layer_version.html.markdown @@ -50,7 +50,7 @@ The following arguments are optional: * `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function. * `s3_key` - (Optional) S3 key of an object containing the function's deployment package. Conflicts with `filename`. * `s3_object_version` - (Optional) Object version containing the function's deployment package. Conflicts with `filename`. -* `skip_destroy` - (Optional) Whether to retain old versions of previously deployed Lambda Layers. Default is `false`. +* `skip_destroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. When this is not set to `true`, changing any of `compatible_architectures`, `compatible_runtimes`, `description`, `filename`, `layer_name`, `license_info`, `s3_bucket`, `s3_key`, `s3_object_version`, or `source_code_hash` forces deletion of the existing layer version and creation of a new layer version. * `source_code_hash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `${filebase64sha256("file.zip")}` (Terraform 0.11.12 or later) or `${base64sha256(file("file.zip"))}` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda layer source archive. ## Attributes Reference From b09ded6de924e28ec2c493d1a5471264ff252bac Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:28:59 -0500 Subject: [PATCH 287/304] docs/lambda_layer_version: Clean up attribs --- website/docs/r/lambda_layer_version.html.markdown | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/docs/r/lambda_layer_version.html.markdown b/website/docs/r/lambda_layer_version.html.markdown index 2ca7b625ccdb..cbf4342e68d2 100644 --- a/website/docs/r/lambda_layer_version.html.markdown +++ b/website/docs/r/lambda_layer_version.html.markdown @@ -57,13 +57,13 @@ The following arguments are optional: In addition to all arguments above, the following attributes are exported: -* `arn` - The Amazon Resource Name (ARN) of the Lambda Layer with version. -* `layer_arn` - The Amazon Resource Name (ARN) of the Lambda Layer without version. -* `created_date` - The date this resource was created. -* `signing_job_arn` - The Amazon Resource Name (ARN) of a signing job. -* `signing_profile_version_arn` - The Amazon Resource Name (ARN) for a signing profile version. -* `source_code_size` - The size in bytes of the function .zip file. -* `version` - This Lambda Layer version. +* `arn` - ARN of the Lambda Layer with version. +* `created_date` - Date this resource was created. +* `layer_arn` - ARN of the Lambda Layer without version. +* `signing_job_arn` - ARN of a signing job. +* `signing_profile_version_arn` - ARN for a signing profile version. +* `source_code_size` - Size in bytes of the function .zip file. +* `version` - Lambda Layer version. [1]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html [2]: https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleRuntimes From fe356c9f5e290681c4650165a98c85ec920ee9cf Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:29:21 -0500 Subject: [PATCH 288/304] lambda_layer_version: Alphabetize args, attribs --- internal/service/lambda/layer_version.go | 85 ++++++++++++------------ 1 file changed, 42 insertions(+), 43 deletions(-) diff --git a/internal/service/lambda/layer_version.go b/internal/service/lambda/layer_version.go index 2ab2cefc343a..41578682535f 100644 --- a/internal/service/lambda/layer_version.go +++ b/internal/service/lambda/layer_version.go @@ -30,6 +30,10 @@ func ResourceLayerVersion() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "compatible_architectures": { Type: schema.TypeSet, Optional: true, @@ -40,9 +44,24 @@ func ResourceLayerVersion() *schema.Resource { ValidateFunc: validation.StringInSlice(lambda.Architecture_Values(), false), }, }, - "layer_name": { + "compatible_runtimes": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 0, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(lambda.Runtime_Values(), false), + }, + }, + "created_date": { Type: schema.TypeString, - Required: true, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, ForceNew: true, }, "filename": { @@ -51,6 +70,21 @@ func ResourceLayerVersion() *schema.Resource { ForceNew: true, ConflictsWith: []string{"s3_bucket", "s3_key", "s3_object_version"}, }, + "layer_arn": { + Type: schema.TypeString, + Computed: true, + }, + "layer_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "license_info": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, "s3_bucket": { Type: schema.TypeString, Optional: true, @@ -69,40 +103,18 @@ func ResourceLayerVersion() *schema.Resource { ForceNew: true, ConflictsWith: []string{"filename"}, }, - "compatible_runtimes": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - MinItems: 0, - MaxItems: 5, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(lambda.Runtime_Values(), false), - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "license_info": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - - "arn": { + "signing_job_arn": { Type: schema.TypeString, Computed: true, }, - "layer_arn": { + "signing_profile_version_arn": { Type: schema.TypeString, Computed: true, }, - "created_date": { - Type: schema.TypeString, - Computed: true, + "skip_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, }, "source_code_hash": { Type: schema.TypeString, @@ -110,19 +122,6 @@ func ResourceLayerVersion() *schema.Resource { ForceNew: true, Computed: true, }, - "signing_profile_version_arn": { - Type: schema.TypeString, - Computed: true, - }, - "signing_job_arn": { - Type: schema.TypeString, - Computed: true, - }, - "skip_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, "source_code_size": { Type: schema.TypeInt, Computed: true, From 9845d6050b51825535b594607f3f1f65f0145be6 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:45:55 -0500 Subject: [PATCH 289/304] layer_version: Switch to forcenew, no update --- internal/service/lambda/layer_version.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/lambda/layer_version.go b/internal/service/lambda/layer_version.go index 41578682535f..e145d4cc9ff9 100644 --- a/internal/service/lambda/layer_version.go +++ b/internal/service/lambda/layer_version.go @@ -113,8 +113,9 @@ func ResourceLayerVersion() *schema.Resource { }, "skip_destroy": { Type: schema.TypeBool, - Optional: true, Default: false, + ForceNew: true, + Optional: true, }, "source_code_hash": { Type: schema.TypeString, From ad5d49f366e46ac71a33eae014a4c7103e06c94e Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:46:18 -0500 Subject: [PATCH 290/304] layer_verison: Add test of new arg --- internal/service/lambda/layer_version_test.go | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/internal/service/lambda/layer_version_test.go b/internal/service/lambda/layer_version_test.go index 969cf1592574..d93478a4cf4f 100644 --- a/internal/service/lambda/layer_version_test.go +++ b/internal/service/lambda/layer_version_test.go @@ -249,6 +249,38 @@ func TestAccLambdaLayerVersion_licenseInfo(t *testing.T) { }) } +func TestAccLambdaLayerVersion_skipDestroy(t *testing.T) { + resourceName := "aws_lambda_layer_version.lambda_layer_test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: nil, // this purposely leaves dangling resources, since skip_destroy = true + Steps: []resource.TestStep{ + { + Config: testAccLayerVersionSkipDestroyConfig(rName, "nodejs12.x"), + Check: resource.ComposeTestCheckFunc( + testAccCheckLayerVersionExists(resourceName, rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "lambda", fmt.Sprintf("layer:%s:1", rName)), + resource.TestCheckResourceAttr(resourceName, "compatible_runtimes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "skip_destroy", "true"), + ), + }, + { + Config: testAccLayerVersionSkipDestroyConfig(rName, "nodejs14.x"), + Check: resource.ComposeTestCheckFunc( + testAccCheckLayerVersionExists(resourceName, rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "lambda", fmt.Sprintf("layer:%s:2", rName)), + resource.TestCheckResourceAttr(resourceName, "compatible_runtimes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "skip_destroy", "true"), + ), + }, + }, + }) +} + func testAccCheckLambdaLayerVersionDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaConn @@ -422,3 +454,14 @@ resource "aws_lambda_layer_version" "lambda_layer_test" { } `, layerName, licenseInfo) } + +func testAccLayerVersionSkipDestroyConfig(rName, compatRuntime string) string { + return fmt.Sprintf(` +resource "aws_lambda_layer_version" "lambda_layer_test" { + filename = "test-fixtures/lambdatest.zip" + layer_name = %[1]q + compatible_runtimes = [%[2]q] + skip_destroy = true +} +`, rName, compatRuntime) +} From 4226a61a1c3e9d8437fa1b6d153eabf156bc2420 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:49:41 -0500 Subject: [PATCH 291/304] layer_version: Remove service from names --- internal/service/lambda/layer_version.go | 6 +++--- internal/service/lambda/layer_version_test.go | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/service/lambda/layer_version.go b/internal/service/lambda/layer_version.go index e145d4cc9ff9..60fad2e537bc 100644 --- a/internal/service/lambda/layer_version.go +++ b/internal/service/lambda/layer_version.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" ) -const awsMutexLambdaLayerKey = `aws_lambda_layer_version` +const awsMutexLayerKey = `aws_lambda_layer_version` func ResourceLayerVersion() *schema.Resource { return &schema.Resource{ @@ -150,8 +150,8 @@ func resourceLayerVersionPublish(d *schema.ResourceData, meta interface{}) error var layerContent *lambda.LayerVersionContentInput if hasFilename { - conns.GlobalMutexKV.Lock(awsMutexLambdaLayerKey) - defer conns.GlobalMutexKV.Unlock(awsMutexLambdaLayerKey) + conns.GlobalMutexKV.Lock(awsMutexLayerKey) + defer conns.GlobalMutexKV.Unlock(awsMutexLayerKey) file, err := loadFileContent(filename.(string)) if err != nil { return fmt.Errorf("Unable to load %q: %s", filename.(string), err) diff --git a/internal/service/lambda/layer_version_test.go b/internal/service/lambda/layer_version_test.go index d93478a4cf4f..0c7e6bbf1691 100644 --- a/internal/service/lambda/layer_version_test.go +++ b/internal/service/lambda/layer_version_test.go @@ -24,7 +24,7 @@ func TestAccLambdaLayerVersion_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy, + CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { Config: testAccLayerVersionBasic(layerName), @@ -60,7 +60,7 @@ func TestAccLambdaLayerVersion_update(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy, + CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { Config: testAccLayerVersionCreateBeforeDestroy(layerName, "test-fixtures/lambdatest.zip"), @@ -92,7 +92,7 @@ func TestAccLambdaLayerVersion_s3(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy, + CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { Config: testAccLayerVersionS3(bucketName, layerName), @@ -118,7 +118,7 @@ func TestAccLambdaLayerVersion_compatibleRuntimes(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy, + CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { Config: testAccLayerVersionCompatibleRuntimes(layerName), @@ -147,7 +147,7 @@ func TestAccLambdaLayerVersion_compatibleArchitectures(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy, + CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { Config: testAccLayerVersionCompatibleArchitecturesNone(layerName), @@ -199,7 +199,7 @@ func TestAccLambdaLayerVersion_description(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy, + CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { Config: testAccLayerVersionDescription(layerName, testDescription), @@ -229,7 +229,7 @@ func TestAccLambdaLayerVersion_licenseInfo(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, - CheckDestroy: testAccCheckLambdaLayerVersionDestroy, + CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { Config: testAccLayerVersionLicenseInfo(layerName, testLicenseInfo), @@ -281,7 +281,7 @@ func TestAccLambdaLayerVersion_skipDestroy(t *testing.T) { }) } -func testAccCheckLambdaLayerVersionDestroy(s *terraform.State) error { +func testAccCheckLayerVersionDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaConn for _, rs := range s.RootModule().Resources { From 0e367cc350fe9fd4a4e891709d42c56e6ae745ae Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:50:24 -0500 Subject: [PATCH 292/304] layer_version: Remove aws from name --- internal/service/lambda/layer_version.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/lambda/layer_version.go b/internal/service/lambda/layer_version.go index 60fad2e537bc..9ac16d7477ea 100644 --- a/internal/service/lambda/layer_version.go +++ b/internal/service/lambda/layer_version.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" ) -const awsMutexLayerKey = `aws_lambda_layer_version` +const mutexLayerKey = `aws_lambda_layer_version` func ResourceLayerVersion() *schema.Resource { return &schema.Resource{ @@ -150,8 +150,8 @@ func resourceLayerVersionPublish(d *schema.ResourceData, meta interface{}) error var layerContent *lambda.LayerVersionContentInput if hasFilename { - conns.GlobalMutexKV.Lock(awsMutexLayerKey) - defer conns.GlobalMutexKV.Unlock(awsMutexLayerKey) + conns.GlobalMutexKV.Lock(mutexLayerKey) + defer conns.GlobalMutexKV.Unlock(mutexLayerKey) file, err := loadFileContent(filename.(string)) if err != nil { return fmt.Errorf("Unable to load %q: %s", filename.(string), err) From 41394b030f77c48abd3072e99d464efb5b212a3b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 15:57:47 -0500 Subject: [PATCH 293/304] layer_version: Fix import testing --- internal/service/lambda/layer_version_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/lambda/layer_version_test.go b/internal/service/lambda/layer_version_test.go index 0c7e6bbf1691..79e1081331ec 100644 --- a/internal/service/lambda/layer_version_test.go +++ b/internal/service/lambda/layer_version_test.go @@ -46,7 +46,7 @@ func TestAccLambdaLayerVersion_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"filename"}, + ImportStateVerifyIgnore: []string{"filename", "skip_destroy"}, }, }, }) @@ -71,7 +71,7 @@ func TestAccLambdaLayerVersion_update(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"filename", "source_code_hash"}, + ImportStateVerifyIgnore: []string{"filename", "source_code_hash", "skip_destroy"}, }, { @@ -103,7 +103,7 @@ func TestAccLambdaLayerVersion_s3(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"s3_bucket", "s3_key"}, + ImportStateVerifyIgnore: []string{"s3_bucket", "s3_key", "skip_destroy"}, }, }, }) @@ -132,7 +132,7 @@ func TestAccLambdaLayerVersion_compatibleRuntimes(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"filename"}, + ImportStateVerifyIgnore: []string{"filename", "skip_destroy"}, }, }, }) @@ -183,7 +183,7 @@ func TestAccLambdaLayerVersion_compatibleArchitectures(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"filename"}, + ImportStateVerifyIgnore: []string{"filename", "skip_destroy"}, }, }, }) @@ -213,7 +213,7 @@ func TestAccLambdaLayerVersion_description(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"filename"}, + ImportStateVerifyIgnore: []string{"filename", "skip_destroy"}, }, }, }) @@ -243,7 +243,7 @@ func TestAccLambdaLayerVersion_licenseInfo(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"filename"}, + ImportStateVerifyIgnore: []string{"filename", "skip_destroy"}, }, }, }) From d61296715ece6b10594778ba4678347e6e42c4e7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 15:58:09 -0500 Subject: [PATCH 294/304] r/aws_iot_thing_group_membership: Acceptance tests passing. Acceptance test output: % make testacc PKG_NAME=internal/service/iot TESTARGS='-run=TestAccIoTThingGroupMembership_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/iot/... -v -count 1 -parallel 20 -run=TestAccIoTThingGroupMembership_ -timeout 180m === RUN TestAccIoTThingGroupMembership_basic === PAUSE TestAccIoTThingGroupMembership_basic === RUN TestAccIoTThingGroupMembership_disappears === PAUSE TestAccIoTThingGroupMembership_disappears === RUN TestAccIoTThingGroupMembership_disappears_Thing === PAUSE TestAccIoTThingGroupMembership_disappears_Thing === RUN TestAccIoTThingGroupMembership_disappears_ThingGroup === PAUSE TestAccIoTThingGroupMembership_disappears_ThingGroup === RUN TestAccIoTThingGroupMembership_overrideDynamicGroup === PAUSE TestAccIoTThingGroupMembership_overrideDynamicGroup === CONT TestAccIoTThingGroupMembership_basic === CONT TestAccIoTThingGroupMembership_disappears_ThingGroup === CONT TestAccIoTThingGroupMembership_disappears === CONT TestAccIoTThingGroupMembership_disappears_Thing === CONT TestAccIoTThingGroupMembership_overrideDynamicGroup --- PASS: TestAccIoTThingGroupMembership_disappears_ThingGroup (17.77s) --- PASS: TestAccIoTThingGroupMembership_disappears_Thing (18.04s) --- PASS: TestAccIoTThingGroupMembership_disappears (18.12s) --- PASS: TestAccIoTThingGroupMembership_basic (19.60s) --- PASS: TestAccIoTThingGroupMembership_overrideDynamicGroup (20.06s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/iot 23.479s --- internal/service/iot/thing.go | 130 +++++++++--------- internal/service/iot/thing_group.go | 1 + .../iot/thing_group_membership_test.go | 8 +- internal/service/iot/thing_test.go | 27 ++-- 4 files changed, 84 insertions(+), 82 deletions(-) diff --git a/internal/service/iot/thing.go b/internal/service/iot/thing.go index a11986b76290..263f5f8fdefd 100644 --- a/internal/service/iot/thing.go +++ b/internal/service/iot/thing.go @@ -1,6 +1,7 @@ package iot import ( + "fmt" "log" "github.com/aws/aws-sdk-go/aws" @@ -10,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func ResourceThing() *schema.Resource { @@ -24,34 +26,34 @@ func ResourceThing() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 128), + "arn": { + Type: schema.TypeString, + Computed: true, }, "attributes": { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "default_client_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, "thing_type_name": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(1, 128), }, - "default_client_id": { - Type: schema.TypeString, - Computed: true, - }, "version": { Type: schema.TypeInt, Computed: true, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, }, } } @@ -59,26 +61,29 @@ func ResourceThing() *schema.Resource { func resourceThingCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - params := &iot.CreateThingInput{ - ThingName: aws.String(d.Get("name").(string)), + name := d.Get("name").(string) + input := &iot.CreateThingInput{ + ThingName: aws.String(name), } - if v, ok := d.GetOk("thing_type_name"); ok { - params.ThingTypeName = aws.String(v.(string)) - } - if v, ok := d.GetOk("attributes"); ok { - params.AttributePayload = &iot.AttributePayload{ + if v, ok := d.GetOk("attributes"); ok && len(v.(map[string]interface{})) > 0 { + input.AttributePayload = &iot.AttributePayload{ Attributes: flex.ExpandStringMap(v.(map[string]interface{})), } } - log.Printf("[DEBUG] Creating IoT Thing: %s", params) - out, err := conn.CreateThing(params) + if v, ok := d.GetOk("thing_type_name"); ok { + input.ThingTypeName = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Creating IoT Thing: %s", input) + output, err := conn.CreateThing(input) + if err != nil { - return err + return fmt.Errorf("error creating IoT Thing (%s): %w", name, err) } - d.SetId(aws.StringValue(out.ThingName)) + d.SetId(aws.StringValue(output.ThingName)) return resourceThingRead(d, meta) } @@ -86,28 +91,24 @@ func resourceThingCreate(d *schema.ResourceData, meta interface{}) error { func resourceThingRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - params := &iot.DescribeThingInput{ - ThingName: aws.String(d.Id()), + output, err := FindThingByName(conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] IoT Thing (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } - log.Printf("[DEBUG] Reading IoT Thing: %s", params) - out, err := conn.DescribeThing(params) if err != nil { - if tfawserr.ErrMessageContains(err, iot.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] IoT Thing %q not found, removing from state", d.Id()) - d.SetId("") - } - return err + return fmt.Errorf("error reading IoT Thing (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] Received IoT Thing: %s", out) - - d.Set("arn", out.ThingArn) - d.Set("name", out.ThingName) - d.Set("attributes", aws.StringValueMap(out.Attributes)) - d.Set("default_client_id", out.DefaultClientId) - d.Set("thing_type_name", out.ThingTypeName) - d.Set("version", out.Version) + d.Set("arn", output.ThingArn) + d.Set("default_client_id", output.DefaultClientId) + d.Set("name", output.ThingName) + d.Set("attributes", aws.StringValueMap(output.Attributes)) + d.Set("thing_type_name", output.ThingTypeName) + d.Set("version", output.Version) return nil } @@ -115,32 +116,35 @@ func resourceThingRead(d *schema.ResourceData, meta interface{}) error { func resourceThingUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - params := &iot.UpdateThingInput{ + input := &iot.UpdateThingInput{ ThingName: aws.String(d.Get("name").(string)), } - if d.HasChange("thing_type_name") { - if v, ok := d.GetOk("thing_type_name"); ok { - params.ThingTypeName = aws.String(v.(string)) - } else { - params.RemoveThingType = aws.Bool(true) - } - } + if d.HasChange("attributes") { attributes := map[string]*string{} - if v, ok := d.GetOk("attributes"); ok { - if m, ok := v.(map[string]interface{}); ok { - attributes = flex.ExpandStringMap(m) - } + if v, ok := d.GetOk("attributes"); ok && len(v.(map[string]interface{})) > 0 { + attributes = flex.ExpandStringMap(v.(map[string]interface{})) } - params.AttributePayload = &iot.AttributePayload{ + + input.AttributePayload = &iot.AttributePayload{ Attributes: attributes, } } - _, err := conn.UpdateThing(params) + if d.HasChange("thing_type_name") { + if v, ok := d.GetOk("thing_type_name"); ok { + input.ThingTypeName = aws.String(v.(string)) + } else { + input.RemoveThingType = aws.Bool(true) + } + } + + log.Printf("[DEBUG] Updating IoT Thing: %s", input) + _, err := conn.UpdateThing(input) + if err != nil { - return err + return fmt.Errorf("error updating IoT Thing (%s): %w", d.Id(), err) } return resourceThingRead(d, meta) @@ -149,17 +153,17 @@ func resourceThingUpdate(d *schema.ResourceData, meta interface{}) error { func resourceThingDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).IoTConn - params := &iot.DeleteThingInput{ + log.Printf("[DEBUG] Deleting IoT Thing: %s", d.Id()) + _, err := conn.DeleteThing(&iot.DeleteThingInput{ ThingName: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + return nil } - log.Printf("[DEBUG] Deleting IoT Thing: %s", params) - _, err := conn.DeleteThing(params) if err != nil { - if tfawserr.ErrMessageContains(err, iot.ErrCodeResourceNotFoundException, "") { - return nil - } - return err + return fmt.Errorf("error deleting IoT Thing (%s): %w", d.Id(), err) } return nil diff --git a/internal/service/iot/thing_group.go b/internal/service/iot/thing_group.go index 8b07a9f96467..277eba5981e4 100644 --- a/internal/service/iot/thing_group.go +++ b/internal/service/iot/thing_group.go @@ -238,6 +238,7 @@ func resourceThingGroupUpdate(d *schema.ResourceData, meta interface{}) error { } } + log.Printf("[DEBUG] Updating IoT Thing Group: %s", input) _, err := conn.UpdateThingGroup(input) if err != nil { diff --git a/internal/service/iot/thing_group_membership_test.go b/internal/service/iot/thing_group_membership_test.go index 273316ce43c1..709d244fbb54 100644 --- a/internal/service/iot/thing_group_membership_test.go +++ b/internal/service/iot/thing_group_membership_test.go @@ -135,10 +135,10 @@ func TestAccIoTThingGroupMembership_overrideDynamicGroup(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - // override_dynamic_group + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"override_dynamic_group"}, }, }, }) diff --git a/internal/service/iot/thing_test.go b/internal/service/iot/thing_test.go index 0f84f7a854f0..2cef0df6adbc 100644 --- a/internal/service/iot/thing_test.go +++ b/internal/service/iot/thing_test.go @@ -4,14 +4,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfiot "github.com/hashicorp/terraform-provider-aws/internal/service/iot" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccIoTThing_basic(t *testing.T) { @@ -111,7 +111,7 @@ func TestAccIoTThing_full(t *testing.T) { }) } -func testAccCheckIotThingExists(n string, thing *iot.DescribeThingOutput) resource.TestCheckFunc { +func testAccCheckIotThingExists(n string, v *iot.DescribeThingOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -123,15 +123,14 @@ func testAccCheckIotThingExists(n string, thing *iot.DescribeThingOutput) resour } conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn - params := &iot.DescribeThingInput{ - ThingName: aws.String(rs.Primary.ID), - } - resp, err := conn.DescribeThing(params) + + output, err := tfiot.FindThingByName(conn, rs.Primary.ID) + if err != nil { return err } - *thing = *resp + *v = *output return nil } @@ -145,19 +144,17 @@ func testAccCheckThingDestroy(s *terraform.State) error { continue } - params := &iot.DescribeThingInput{ - ThingName: aws.String(rs.Primary.ID), + _, err := tfiot.FindThingByName(conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue } - _, err := conn.DescribeThing(params) if err != nil { - if tfawserr.ErrMessageContains(err, iot.ErrCodeResourceNotFoundException, "") { - return nil - } return err } - return fmt.Errorf("Expected IoT Thing to be destroyed, %s found", rs.Primary.ID) + return fmt.Errorf("IoT Thing %s still exists", rs.Primary.ID) } return nil From ceeff5473ca9d5849985a83d0ffb40d117aecb98 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 16:08:58 -0500 Subject: [PATCH 295/304] tests/layer_version: Standardize --- internal/service/lambda/layer_version_test.go | 140 +++++++++--------- 1 file changed, 67 insertions(+), 73 deletions(-) diff --git a/internal/service/lambda/layer_version_test.go b/internal/service/lambda/layer_version_test.go index 79e1081331ec..d5dcb9c8755c 100644 --- a/internal/service/lambda/layer_version_test.go +++ b/internal/service/lambda/layer_version_test.go @@ -18,7 +18,7 @@ import ( func TestAccLambdaLayerVersion_basic(t *testing.T) { resourceName := "aws_lambda_layer_version.lambda_layer_test" - layerName := fmt.Sprintf("tf_acc_lambda_layer_basic_%s", sdkacctest.RandString(8)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -27,15 +27,15 @@ func TestAccLambdaLayerVersion_basic(t *testing.T) { CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccLayerVersionBasic(layerName), + Config: testAccLayerVersionBasic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "lambda", fmt.Sprintf("layer:%s:1", layerName)), + testAccCheckLayerVersionExists(resourceName, rName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "lambda", fmt.Sprintf("layer:%s:1", rName)), resource.TestCheckResourceAttr(resourceName, "compatible_runtimes.#", "0"), resource.TestCheckResourceAttr(resourceName, "description", ""), - resource.TestCheckResourceAttr(resourceName, "layer_name", layerName), + resource.TestCheckResourceAttr(resourceName, "layer_name", rName), resource.TestCheckResourceAttr(resourceName, "license_info", ""), - acctest.CheckResourceAttrRegionalARN(resourceName, "layer_arn", "lambda", fmt.Sprintf("layer:%s", layerName)), + acctest.CheckResourceAttrRegionalARN(resourceName, "layer_arn", "lambda", fmt.Sprintf("layer:%s", rName)), resource.TestCheckResourceAttr(resourceName, "version", "1"), resource.TestCheckResourceAttr(resourceName, "signing_profile_version_arn", ""), resource.TestCheckResourceAttr(resourceName, "signing_job_arn", ""), @@ -54,7 +54,7 @@ func TestAccLambdaLayerVersion_basic(t *testing.T) { func TestAccLambdaLayerVersion_update(t *testing.T) { resourceName := "aws_lambda_layer_version.lambda_layer_test" - layerName := fmt.Sprintf("tf_acc_lambda_layer_basic_%s", sdkacctest.RandString(8)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -63,8 +63,8 @@ func TestAccLambdaLayerVersion_update(t *testing.T) { CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccLayerVersionCreateBeforeDestroy(layerName, "test-fixtures/lambdatest.zip"), - Check: testAccCheckLayerVersionExists(resourceName, layerName), + Config: testAccLayerVersionCreateBeforeDestroy(rName, "test-fixtures/lambdatest.zip"), + Check: testAccCheckLayerVersionExists(resourceName, rName), }, { @@ -75,8 +75,8 @@ func TestAccLambdaLayerVersion_update(t *testing.T) { }, { - Config: testAccLayerVersionCreateBeforeDestroy(layerName, "test-fixtures/lambdatest_modified.zip"), - Check: testAccCheckLayerVersionExists(resourceName, layerName), + Config: testAccLayerVersionCreateBeforeDestroy(rName, "test-fixtures/lambdatest_modified.zip"), + Check: testAccCheckLayerVersionExists(resourceName, rName), }, }, }) @@ -84,9 +84,7 @@ func TestAccLambdaLayerVersion_update(t *testing.T) { func TestAccLambdaLayerVersion_s3(t *testing.T) { resourceName := "aws_lambda_layer_version.lambda_layer_test" - rString := sdkacctest.RandString(8) - layerName := fmt.Sprintf("tf_acc_lambda_layer_s3_%s", rString) - bucketName := fmt.Sprintf("tf-acc-bucket-lambda-layer-s3-%s", rString) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -95,8 +93,8 @@ func TestAccLambdaLayerVersion_s3(t *testing.T) { CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccLayerVersionS3(bucketName, layerName), - Check: testAccCheckLayerVersionExists(resourceName, layerName), + Config: testAccLayerVersionS3(rName), + Check: testAccCheckLayerVersionExists(resourceName, rName), }, { @@ -111,8 +109,7 @@ func TestAccLambdaLayerVersion_s3(t *testing.T) { func TestAccLambdaLayerVersion_compatibleRuntimes(t *testing.T) { resourceName := "aws_lambda_layer_version.lambda_layer_test" - rString := sdkacctest.RandString(8) - layerName := fmt.Sprintf("tf_acc_lambda_layer_runtimes_%s", rString) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -121,9 +118,9 @@ func TestAccLambdaLayerVersion_compatibleRuntimes(t *testing.T) { CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccLayerVersionCompatibleRuntimes(layerName), + Config: testAccLayerVersionCompatibleRuntimes(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), + testAccCheckLayerVersionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "compatible_runtimes.#", "2"), ), }, @@ -140,8 +137,7 @@ func TestAccLambdaLayerVersion_compatibleRuntimes(t *testing.T) { func TestAccLambdaLayerVersion_compatibleArchitectures(t *testing.T) { resourceName := "aws_lambda_layer_version.lambda_layer_test" - rString := sdkacctest.RandString(8) - layerName := fmt.Sprintf("tf_acc_lambda_layer_architectures_%s", rString) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -150,31 +146,31 @@ func TestAccLambdaLayerVersion_compatibleArchitectures(t *testing.T) { CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccLayerVersionCompatibleArchitecturesNone(layerName), + Config: testAccLayerVersionCompatibleArchitecturesNone(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), + testAccCheckLayerVersionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "compatible_architectures.#", "0"), ), }, { - Config: testAccLayerVersionCompatibleArchitecturesX86(layerName), + Config: testAccLayerVersionCompatibleArchitecturesX86(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), + testAccCheckLayerVersionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "compatible_architectures.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "compatible_architectures.*", lambda.ArchitectureX8664), ), }, { - Config: testAccLayerVersionCompatibleArchitecturesArm(layerName), + Config: testAccLayerVersionCompatibleArchitecturesArm(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), + testAccCheckLayerVersionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "compatible_architectures.#", "1"), ), }, { - Config: testAccLayerVersionCompatibleArchitecturesX86Arm(layerName), + Config: testAccLayerVersionCompatibleArchitecturesX86Arm(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), + testAccCheckLayerVersionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "compatible_architectures.#", "2"), ), }, @@ -191,8 +187,7 @@ func TestAccLambdaLayerVersion_compatibleArchitectures(t *testing.T) { func TestAccLambdaLayerVersion_description(t *testing.T) { resourceName := "aws_lambda_layer_version.lambda_layer_test" - rString := sdkacctest.RandString(8) - layerName := fmt.Sprintf("tf_acc_lambda_layer_description_%s", rString) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) testDescription := "test description" resource.ParallelTest(t, resource.TestCase{ @@ -202,9 +197,9 @@ func TestAccLambdaLayerVersion_description(t *testing.T) { CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccLayerVersionDescription(layerName, testDescription), + Config: testAccLayerVersionDescription(rName, testDescription), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), + testAccCheckLayerVersionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "description", testDescription), ), }, @@ -221,8 +216,7 @@ func TestAccLambdaLayerVersion_description(t *testing.T) { func TestAccLambdaLayerVersion_licenseInfo(t *testing.T) { resourceName := "aws_lambda_layer_version.lambda_layer_test" - rString := sdkacctest.RandString(8) - layerName := fmt.Sprintf("tf_acc_lambda_layer_license_info_%s", rString) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) testLicenseInfo := "MIT" resource.ParallelTest(t, resource.TestCase{ @@ -232,9 +226,9 @@ func TestAccLambdaLayerVersion_licenseInfo(t *testing.T) { CheckDestroy: testAccCheckLayerVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccLayerVersionLicenseInfo(layerName, testLicenseInfo), + Config: testAccLayerVersionLicenseInfo(rName, testLicenseInfo), Check: resource.ComposeTestCheckFunc( - testAccCheckLayerVersionExists(resourceName, layerName), + testAccCheckLayerVersionExists(resourceName, rName), resource.TestCheckResourceAttr(resourceName, "license_info", testLicenseInfo), ), }, @@ -340,19 +334,19 @@ func testAccCheckLayerVersionExists(res, layerName string) resource.TestCheckFun } } -func testAccLayerVersionBasic(layerName string) string { +func testAccLayerVersionBasic(rName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q } -`, layerName) +`, rName) } -func testAccLayerVersionS3(bucketName, layerName string) string { +func testAccLayerVersionS3(rName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "lambda_bucket" { - bucket = "%s" + bucket = %[1]q } resource "aws_s3_bucket_object" "lambda_code" { @@ -364,95 +358,95 @@ resource "aws_s3_bucket_object" "lambda_code" { resource "aws_lambda_layer_version" "lambda_layer_test" { s3_bucket = aws_s3_bucket.lambda_bucket.id s3_key = aws_s3_bucket_object.lambda_code.id - layer_name = "%s" + layer_name = %[1]q } -`, bucketName, layerName) +`, rName) } -func testAccLayerVersionCreateBeforeDestroy(layerName string, filename string) string { +func testAccLayerVersionCreateBeforeDestroy(rName string, filename string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { - filename = "%s" - layer_name = "%s" - source_code_hash = filebase64sha256("%s") + filename = %[1]q + layer_name = %[2]q + source_code_hash = filebase64sha256(%[1]q) lifecycle { create_before_destroy = true } } -`, filename, layerName, filename) +`, filename, rName) } -func testAccLayerVersionCompatibleRuntimes(layerName string) string { +func testAccLayerVersionCompatibleRuntimes(rName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q compatible_runtimes = ["nodejs12.x", "nodejs10.x"] } -`, layerName) +`, rName) } -func testAccLayerVersionCompatibleArchitecturesNone(layerName string) string { +func testAccLayerVersionCompatibleArchitecturesNone(rName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q } -`, layerName) +`, rName) } -func testAccLayerVersionCompatibleArchitecturesX86Arm(layerName string) string { +func testAccLayerVersionCompatibleArchitecturesX86Arm(rName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q compatible_architectures = ["x86_64", "arm64"] } -`, layerName) +`, rName) } -func testAccLayerVersionCompatibleArchitecturesX86(layerName string) string { +func testAccLayerVersionCompatibleArchitecturesX86(rName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q compatible_architectures = ["x86_64"] } -`, layerName) +`, rName) } -func testAccLayerVersionCompatibleArchitecturesArm(layerName string) string { +func testAccLayerVersionCompatibleArchitecturesArm(rName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q compatible_architectures = ["arm64"] } -`, layerName) +`, rName) } -func testAccLayerVersionDescription(layerName string, description string) string { +func testAccLayerVersionDescription(rName string, description string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q - description = "%s" + description = %[2]q } -`, layerName, description) +`, rName, description) } -func testAccLayerVersionLicenseInfo(layerName string, licenseInfo string) string { +func testAccLayerVersionLicenseInfo(rName string, licenseInfo string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "lambda_layer_test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q - license_info = "%s" + license_info = %[2]q } -`, layerName, licenseInfo) +`, rName, licenseInfo) } func testAccLayerVersionSkipDestroyConfig(rName, compatRuntime string) string { From b4d325a44eabc04cae2e4e47948fd3dfb75cfd0b Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 18 Nov 2021 16:15:42 -0500 Subject: [PATCH 296/304] CR updates --- .../lambda/layer_version_permission.go | 172 ++++++++++-------- .../lambda/layer_version_permission_test.go | 139 +++++++++----- internal/service/lambda/sweep.go | 48 ----- ...bda_layer_version_permission.html.markdown | 48 +++-- 4 files changed, 210 insertions(+), 197 deletions(-) diff --git a/internal/service/lambda/layer_version_permission.go b/internal/service/lambda/layer_version_permission.go index f816da36df1a..5f858a18830c 100644 --- a/internal/service/lambda/layer_version_permission.go +++ b/internal/service/lambda/layer_version_permission.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "reflect" + "regexp" "strconv" "strings" @@ -13,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/lambda" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/service/iam" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -29,13 +31,16 @@ func ResourceLayerVersionPermission() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "layer_arn": { - Type: schema.TypeString, - ValidateFunc: verify.ValidARN, - Required: true, - ForceNew: true, + "layer_name": { + Type: schema.TypeString, + ValidateFunc: validation.Any( + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-_]+$`), ""), + verify.ValidARN, + ), + Required: true, + ForceNew: true, }, - "layer_version": { + "version_number": { Type: schema.TypeInt, Required: true, ForceNew: true, @@ -75,25 +80,27 @@ func ResourceLayerVersionPermission() *schema.Resource { func resourceLayerVersionPermissionCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).LambdaConn + layerName := d.Get("layer_name").(string) + versionNumber := d.Get("version_number").(int) + params := &lambda.AddLayerVersionPermissionInput{ - LayerName: aws.String(d.Get("layer_arn").(string)), - VersionNumber: aws.Int64(int64(d.Get("layer_version").(int))), + LayerName: aws.String(layerName), + VersionNumber: aws.Int64(int64(versionNumber)), Action: aws.String(d.Get("action").(string)), Principal: aws.String(d.Get("principal").(string)), StatementId: aws.String(d.Get("statement_id").(string)), } - if d.Get("organization_id").(string) != "" { - params.OrganizationId = aws.String(d.Get("organization_id").(string)) + if v, ok := d.GetOk("organization_id"); ok { + params.OrganizationId = aws.String(v.(string)) } - log.Printf("[DEBUG] Adding Lambda layer permissions: %s", params) _, err := conn.AddLayerVersionPermission(params) if err != nil { - return fmt.Errorf("Error adding lambda layer permissions: %s", err) + return fmt.Errorf("error adding Lambda Layer Version Permission (layer: %s, version: %d): %w", layerName, versionNumber, err) } - d.SetId(fmt.Sprintf("%s:%s", *params.LayerName, strconv.FormatInt(*params.VersionNumber, 10))) + d.SetId(fmt.Sprintf("%s,%d", layerName, versionNumber)) return resourceLayerVersionPermissionRead(d, meta) } @@ -101,24 +108,26 @@ func resourceLayerVersionPermissionCreate(d *schema.ResourceData, meta interface func resourceLayerVersionPermissionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).LambdaConn - layerName, layerArn, version, err := ResourceLayerVersionPermissionParseId(d.Id()) + layerName, versionNumber, err := ResourceLayerVersionPermissionParseId(d.Id()) if err != nil { - return fmt.Errorf("Error parsing lambda layer ID: %s", err) + return err } - layerVersionPolicyOutput, err := conn.GetLayerVersionPolicy(&lambda.GetLayerVersionPolicyInput{ + input := &lambda.GetLayerVersionPolicyInput{ LayerName: aws.String(layerName), - VersionNumber: aws.Int64(version), - }) + VersionNumber: aws.Int64(versionNumber), + } + + layerVersionPolicyOutput, err := conn.GetLayerVersionPolicy(input) if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] Lambda Layer Version (%s) not found, removing it's permission from state", d.Id()) + log.Printf("[WARN] Lambda Layer Version Permission (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return fmt.Errorf("error reading Lambda Layer version permission (%s): %s", d.Id(), err) + return fmt.Errorf("error reading Lambda Layer Version Permission (%s): %w", d.Id(), err) } policyDoc := &iam.IAMPolicyDoc{} @@ -127,43 +136,55 @@ func resourceLayerVersionPermissionRead(d *schema.ResourceData, meta interface{} return err } - principal := "" - identifiers := policyDoc.Statements[0].Principals[0].Identifiers - if reflect.TypeOf(identifiers).String() == "[]string" && identifiers.([]string)[0] == "*" { - principal = "*" - } else { - policy_principal_arn, err := arn.Parse(policyDoc.Statements[0].Principals[0].Identifiers.(string)) - if err != nil { - return fmt.Errorf("error reading principal arn from Lambda Layer version permission (%s): %s", d.Id(), err) + d.Set("layer_name", layerName) + d.Set("version_number", versionNumber) + d.Set("policy", layerVersionPolicyOutput.Policy) + d.Set("revision_id", layerVersionPolicyOutput.RevisionId) + + if policyDoc != nil && len(policyDoc.Statements) > 0 { + d.Set("statement_id", policyDoc.Statements[0].Sid) + + if actions := policyDoc.Statements[0].Actions; actions != nil { + var action string + t := reflect.TypeOf(actions) + if t.String() == "[]string" && len(actions.([]string)) > 0 { + action = actions.([]string)[0] + } else if t.String() == "string" { + action = actions.(string) + } + + d.Set("action", action) } - principal = policy_principal_arn.AccountID - } - if err := d.Set("layer_arn", layerArn); err != nil { - return fmt.Errorf("Error setting lambda layer permission layer_arn: %s", err) - } - if err := d.Set("layer_version", version); err != nil { - return fmt.Errorf("Error setting lambda layer permission layer_version: %s", err) - } - if err := d.Set("statement_id", policyDoc.Statements[0].Sid); err != nil { - return fmt.Errorf("Error setting lambda layer permission statement_id: %s", err) - } - if err := d.Set("action", policyDoc.Statements[0].Actions); err != nil { - return fmt.Errorf("Error setting lambda layer permission action: %s", err) - } - if err := d.Set("principal", principal); err != nil { - return fmt.Errorf("Error setting lambda layer permission statement_id: %s", err) - } - if len(policyDoc.Statements[0].Conditions) > 0 { - if err := d.Set("organization_id", policyDoc.Statements[0].Conditions[0].Values.([]string)[0]); err != nil { - return fmt.Errorf("Error setting lambda layer permission organization_id: %s", err) + if len(policyDoc.Statements[0].Conditions) > 0 && policyDoc.Statements[0].Conditions[0].Values != nil { + var organizationId string + values := policyDoc.Statements[0].Conditions[0].Values + t := reflect.TypeOf(values) + if t.String() == "[]string" && len(values.([]string)) > 0 { + organizationId = values.([]string)[0] + } else if t.String() == "string" { + organizationId = values.(string) + } + + d.Set("organization_id", organizationId) + } + + if len(policyDoc.Statements[0].Principals) > 0 && policyDoc.Statements[0].Principals[0].Identifiers != nil { + var principal string + identifiers := policyDoc.Statements[0].Principals[0].Identifiers + t := reflect.TypeOf(identifiers) + if t.String() == "[]string" && len(identifiers.([]string)) > 0 && identifiers.([]string)[0] == "*" { + principal = "*" + } else if t.String() == "string" { + policyPrincipalArn, err := arn.Parse(identifiers.(string)) + if err != nil { + return fmt.Errorf("error reading Principal ARN from Lambda Layer Version Permission (%s): %w", d.Id(), err) + } + principal = policyPrincipalArn.AccountID + } + + d.Set("principal", principal) } - } - if err := d.Set("policy", layerVersionPolicyOutput.Policy); err != nil { - return fmt.Errorf("Error setting lambda layer permission policy: %s", err) - } - if err := d.Set("revision_id", layerVersionPolicyOutput.RevisionId); err != nil { - return fmt.Errorf("Error setting lambda layer permission revision_id: %s", err) } return nil @@ -172,37 +193,42 @@ func resourceLayerVersionPermissionRead(d *schema.ResourceData, meta interface{} func resourceLayerVersionPermissionDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).LambdaConn - layerName, _, version, err := ResourceLayerVersionPermissionParseId(d.Id()) + layerName, versionNumber, err := ResourceLayerVersionPermissionParseId(d.Id()) if err != nil { - return fmt.Errorf("Error parsing lambda layer ID: %s", err) + return err } - _, err = conn.RemoveLayerVersionPermission(&lambda.RemoveLayerVersionPermissionInput{ + input := &lambda.RemoveLayerVersionPermissionInput{ LayerName: aws.String(layerName), - VersionNumber: aws.Int64(version), + VersionNumber: aws.Int64(versionNumber), StatementId: aws.String(d.Get("statement_id").(string)), - }) + } + + _, err = conn.RemoveLayerVersionPermission(input) + + if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { + return nil + } + if err != nil { - return fmt.Errorf("error deleting Lambda Layer Version permission (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting Lambda Layer Version Permission (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] Lambda layer permission %q deleted", d.Get("statement_id").(string)) return nil } -func ResourceLayerVersionPermissionParseId(id string) (layerName string, layerARN string, version int64, err error) { - arn, err := arn.Parse(id) - if err != nil { - return +func ResourceLayerVersionPermissionParseId(id string) (string, int64, error) { + parts := strings.Split(id, ",") + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", 0, fmt.Errorf("unexpected format of ID (%s), expected LAYER_NAME,VERSION_NUMBER or LAYER_ARN,VERSION_NUMBER", id) } - parts := strings.Split(arn.Resource, ":") - if len(parts) != 3 || parts[0] != "layer" { - err = fmt.Errorf("lambda_layer ID must be a valid Layer ARN") - return + + layerName := parts[0] + versionNum, err := strconv.ParseInt(parts[1], 10, 64) + + if err != nil { + return "", 0, err } - layerName = parts[1] - layerARN = strings.TrimSuffix(id, ":"+parts[2]) - version, err = strconv.ParseInt(parts[2], 10, 64) - return + return layerName, versionNum, nil } diff --git a/internal/service/lambda/layer_version_permission_test.go b/internal/service/lambda/layer_version_permission_test.go index 6d5ed77d5ebc..8fe43492ccf9 100644 --- a/internal/service/lambda/layer_version_permission_test.go +++ b/internal/service/lambda/layer_version_permission_test.go @@ -15,26 +15,55 @@ import ( tflambda "github.com/hashicorp/terraform-provider-aws/internal/service/lambda" ) -func TestLambdaLayerVersionPermission_all(t *testing.T) { +func TestAccLambdaLayerVersionPermission_basic_byARN(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { - Config: testLayerVersionPermission_all(rName), + Config: testLayerVersionPermission_basic_arn(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "*"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), - resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.test", "layer_arn"), + resource.TestCheckResourceAttrPair(resourceName, "layer_name", "aws_lambda_layer_version.test", "layer_arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccLambdaLayerVersionPermission_basic_byName(t *testing.T) { + resourceName := "aws_lambda_layer_version_permission.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, + Steps: []resource.TestStep{ + { + Config: testLayerVersionPermission_basic_name(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), + resource.TestCheckResourceAttr(resourceName, "principal", "*"), + resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), + resource.TestCheckResourceAttrPair(resourceName, "layer_name", "aws_lambda_layer_version.test", "layer_name"), + ), + }, { ResourceName: resourceName, ImportState: true, @@ -44,27 +73,27 @@ func TestLambdaLayerVersionPermission_all(t *testing.T) { }) } -func TestLambdaLayerVersionPermission_org(t *testing.T) { +func TestAccLambdaLayerVersionPermission_org(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { Config: testLayerVersionPermission_org(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), resource.TestCheckResourceAttr(resourceName, "principal", "*"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), resource.TestCheckResourceAttr(resourceName, "organization_id", "o-0123456789"), - resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.test", "layer_arn"), + resource.TestCheckResourceAttrPair(resourceName, "layer_name", "aws_lambda_layer_version.test", "layer_arn"), ), }, - { ResourceName: resourceName, ImportState: true, @@ -74,26 +103,26 @@ func TestLambdaLayerVersionPermission_org(t *testing.T) { }) } -func TestLambdaLayerVersionPermission_account(t *testing.T) { +func TestAccLambdaLayerVersionPermission_account(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { Config: testLayerVersionPermission_account(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName), resource.TestCheckResourceAttr(resourceName, "action", "lambda:GetLayerVersion"), - resource.TestCheckResourceAttr(resourceName, "principal", "456789820214"), + resource.TestCheckResourceAttrPair(resourceName, "principal", "data.aws_caller_identity.current", "account_id"), resource.TestCheckResourceAttr(resourceName, "statement_id", "xaccount"), - resource.TestCheckResourceAttrPair(resourceName, "layer_arn", "aws_lambda_layer_version.test", "layer_arn"), + resource.TestCheckResourceAttrPair(resourceName, "layer_name", "aws_lambda_layer_version.test", "layer_arn"), ), }, - { ResourceName: resourceName, ImportState: true, @@ -103,19 +132,20 @@ func TestLambdaLayerVersionPermission_account(t *testing.T) { }) } -func TestLambdaLayerVersionPermission_disappears(t *testing.T) { +func TestAccLambdaLayerVersionPermission_disappears(t *testing.T) { resourceName := "aws_lambda_layer_version_permission.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), Providers: acctest.Providers, CheckDestroy: testAccCheckLambdaLayerVersionPermissionDestroy, Steps: []resource.TestStep{ { Config: testLayerVersionPermission_account(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName, rName), + testAccCheckAwsLambdaLayerVersionPermissionExists(resourceName), acctest.CheckResourceDisappears(acctest.Provider, tflambda.ResourceLayerVersionPermission(), resourceName), ), ExpectNonEmptyPlan: true, @@ -126,19 +156,36 @@ func TestLambdaLayerVersionPermission_disappears(t *testing.T) { // Creating Lambda layer and Lambda layer permissions -func testLayerVersionPermission_all(layerName string) string { +func testLayerVersionPermission_basic_arn(layerName string) string { return fmt.Sprintf(` resource "aws_lambda_layer_version" "test" { filename = "test-fixtures/lambdatest.zip" - layer_name = "%s" + layer_name = %[1]q } resource "aws_lambda_layer_version_permission" "test" { - layer_arn = aws_lambda_layer_version.test.layer_arn - layer_version = aws_lambda_layer_version.test.version - action = "lambda:GetLayerVersion" - statement_id = "xaccount" - principal = "*" + layer_name = aws_lambda_layer_version.test.layer_arn + version_number = aws_lambda_layer_version.test.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "*" +} +`, layerName) +} + +func testLayerVersionPermission_basic_name(layerName string) string { + return fmt.Sprintf(` +resource "aws_lambda_layer_version" "test" { + filename = "test-fixtures/lambdatest.zip" + layer_name = %[1]q +} + +resource "aws_lambda_layer_version_permission" "test" { + layer_name = aws_lambda_layer_version.test.layer_name + version_number = aws_lambda_layer_version.test.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = "*" } `, layerName) } @@ -151,8 +198,8 @@ resource "aws_lambda_layer_version" "test" { } resource "aws_lambda_layer_version_permission" "test" { - layer_arn = aws_lambda_layer_version.test.layer_arn - layer_version = aws_lambda_layer_version.test.version + layer_name = aws_lambda_layer_version.test.layer_arn + version_number = aws_lambda_layer_version.test.version action = "lambda:GetLayerVersion" statement_id = "xaccount" principal = "*" @@ -163,53 +210,51 @@ resource "aws_lambda_layer_version_permission" "test" { func testLayerVersionPermission_account(layerName string) string { return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + resource "aws_lambda_layer_version" "test" { filename = "test-fixtures/lambdatest.zip" layer_name = "%s" } resource "aws_lambda_layer_version_permission" "test" { - layer_arn = aws_lambda_layer_version.test.layer_arn - layer_version = aws_lambda_layer_version.test.version - action = "lambda:GetLayerVersion" - statement_id = "xaccount" - principal = "456789820214" + layer_name = aws_lambda_layer_version.test.layer_arn + version_number = aws_lambda_layer_version.test.version + action = "lambda:GetLayerVersion" + statement_id = "xaccount" + principal = data.aws_caller_identity.current.account_id } `, layerName) } -func testAccCheckAwsLambdaLayerVersionPermissionExists(res, layerName string) resource.TestCheckFunc { +func testAccCheckAwsLambdaLayerVersionPermissionExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[res] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Lambda Layer version permission not found: %s", res) + return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { return fmt.Errorf("Lambda Layer version policy ID not set") } - if rs.Primary.Attributes["revision_id"] == "" { - return fmt.Errorf("Lambda Layer Version Permission not set") - } - - _, _, version, err := tflambda.ResourceLayerVersionPermissionParseId(rs.Primary.Attributes["id"]) + layerName, versionNumber, err := tflambda.ResourceLayerVersionPermissionParseId(rs.Primary.ID) if err != nil { - return fmt.Errorf("Error parsing lambda layer ID: %s", err) + return fmt.Errorf("error parsing lambda layer ID: %w", err) } conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaConn _, err = conn.GetLayerVersionPolicy(&lambda.GetLayerVersionPolicyInput{ LayerName: aws.String(layerName), - VersionNumber: aws.Int64(version), + VersionNumber: aws.Int64(versionNumber), }) - if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { + if err != nil { return err } - return err + return nil } } @@ -221,29 +266,23 @@ func testAccCheckLambdaLayerVersionPermissionDestroy(s *terraform.State) error { continue } - layerName, _, version, err := tflambda.ResourceLayerVersionPermissionParseId(rs.Primary.ID) + layerName, versionNumber, err := tflambda.ResourceLayerVersionPermissionParseId(rs.Primary.ID) if err != nil { return err } _, err = conn.GetLayerVersionPolicy(&lambda.GetLayerVersionPolicyInput{ LayerName: aws.String(layerName), - VersionNumber: aws.Int64(version), + VersionNumber: aws.Int64(versionNumber), }) if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { continue } + if err != nil { return err } - // as I've created Lambda layer, not only layer permission, need to check if layer was destroyed. - err = testAccCheckLambdaLayerVersionDestroy(s) - if err != nil { - return err - } - - return fmt.Errorf("Lambda Layer Version Permission (%s) still exists", rs.Primary.ID) } return nil } diff --git a/internal/service/lambda/sweep.go b/internal/service/lambda/sweep.go index af8db1a63f80..a7681a5b928f 100644 --- a/internal/service/lambda/sweep.go +++ b/internal/service/lambda/sweep.go @@ -23,11 +23,6 @@ func init() { Name: "aws_lambda_layer", F: sweepLayerVersions, }) - - resource.AddTestSweepers("aws_lambda_layer_version_permission", &resource.Sweeper{ - Name: "aws_lambda_layer_version_permission", - F: sweepLayerVersionPermissions, - }) } func sweepFunctions(region string) error { @@ -107,46 +102,3 @@ func sweepLayerVersions(region string) error { return nil } - -func sweepLayerVersionPermissions(region string) error { - client, err := sweep.SharedRegionalSweepClient(region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } - - lambdaconn := client.(*AWSClient).lambdaconn - resp, err := lambdaconn.ListLayers(&lambda.ListLayersInput{}) - if err != nil { - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Lambda Layer sweep for %s: %s", region, err) - return nil - } - return fmt.Errorf("Error retrieving Lambda layers: %s", err) - } - - if len(resp.Layers) == 0 { - log.Print("[DEBUG] No aws lambda layers to sweep") - return nil - } - - for _, l := range resp.Layers { - versionResp, err := lambdaconn.ListLayerVersions(&lambda.ListLayerVersionsInput{ - LayerName: l.LayerName, - }) - if err != nil { - return fmt.Errorf("Error retrieving versions for lambda layer: %s", err) - } - - for _, v := range versionResp.LayerVersions { - _, err := lambdaconn.DeleteLayerVersion(&lambda.DeleteLayerVersionInput{ - LayerName: l.LayerName, - VersionNumber: v.Version, - }) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/website/docs/r/lambda_layer_version_permission.html.markdown b/website/docs/r/lambda_layer_version_permission.html.markdown index 53772d8c6d9a..94211f1709e9 100644 --- a/website/docs/r/lambda_layer_version_permission.html.markdown +++ b/website/docs/r/lambda_layer_version_permission.html.markdown @@ -3,7 +3,7 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_layer_version_permission" description: |- - Provides a Lambda Layer Version Permission resource. It allows you to share you own Lambda Layers to another account by account ID, to all accounts in AWS organization or even to all AWS accounts. + Provides a Lambda Layer Version Permission resource. --- # Resource: aws_lambda_layer_version_permission @@ -14,45 +14,41 @@ For information about Lambda Layer Permissions and how to use them, see [Using R ## Example Usage -```hcl +```terraform resource "aws_lambda_layer_version_permission" "lambda_layer_permission" { - layer_arn = "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1" - layer_version = 1 - principal = "111111111111" - action = "lambda:GetLayerVersion" - statement_id = "dev-account" + layer_name = "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1" + version_number = 1 + principal = "111111111111" + action = "lambda:GetLayerVersion" + statement_id = "dev-account" } ``` ## Argument Reference -* `layer_arn` (Required) ARN of the Lambda Layer, which you want to grant access to. -* `layer_version` (Required) Version of Lambda Layer, which you want to grant access to. Note: permissions only apply to a single version of a layer. -* `principal` - (Required) AWS account ID which should be able to use your Lambda Layer. `*` can be used here, if you want to share your Lambda Layer widely. -* `organization_id` - (Optional) An identifier of AWS Organization, which should be able to use your Lambda Layer. `principal` should be equal to `*` if `organization_id` provided. +The following arguments are supported: + * `action` - (Required) Action, which will be allowed. `lambda:GetLayerVersion` value is suggested by AWS documantation. +* `layer_name` (Required) The name or ARN of the Lambda Layer, which you want to grant access to. +* `organization_id` - (Optional) An identifier of AWS Organization, which should be able to use your Lambda Layer. `principal` should be equal to `*` if `organization_id` provided. +* `principal` - (Required) AWS account ID which should be able to use your Lambda Layer. `*` can be used here, if you want to share your Lambda Layer widely. * `statement_id` - (Required) The name of Lambda Layer Permission, for example `dev-account` - human readable note about what is this permission for. - +* `version_number` (Required) Version of Lambda Layer, which you want to grant access to. Note: permissions only apply to a single version of a layer. ## Attributes Reference -* `layer_arn` - The Amazon Resource Name (ARN) of the Lambda Layer without version. -* `layer_version` - The version of Lambda Layer. -* `principal` - The principal which was granted access to your Lambda Layer. -* `organization_id` - The AWS Organization which was granted access to your Lambda Layer. -* `action` - Action, which is allowed to principal. -* `statement_id` - Human readable name of Lambda Layer Permission. -* `revision_id` - Identifier of Lambda Layer Permission. -* `policy` - Full Lambda Layer Permission policy. +In addition to all arguments above, the following attributes are exported: -[1]: https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountlayer +* `id` - The `layer_name` and `version_number`, separated by a comma (`,`). +* `revision_id` - A unique identifier for the current revision of the policy. +* `policy` - Full Lambda Layer Permission policy. ## Import -Lambda Layer Permissions can be imported using `arn`. +Lambda Layer Permissions can be imported using `layer_name` and `version_number`, separated by a comma (`,`). +```sh +$ terraform import aws_lambda_layer_version_permission.example arn:aws:lambda:us-west-2:123456654321:layer:test_layer1,1 ``` -$ terraform import \ - aws_lambda_layer_version_permission.lambda_layer_permission \ - arn:aws:lambda:_REGION_:_ACCOUNT_ID_:layer:_LAYER_NAME_:_LAYER_VERSION_ -``` + +[1]: https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountlayer From 5c3a280a9a063ad9fe02d3130cff7ed14c00814f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 16:16:40 -0500 Subject: [PATCH 297/304] Tidy up documentation. --- website/docs/r/iot_thing_group.html.markdown | 8 ++++---- website/docs/r/iot_thing_group_membership.html.markdown | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/website/docs/r/iot_thing_group.html.markdown b/website/docs/r/iot_thing_group.html.markdown index bcb0d7115c59..57d1cc80ad86 100644 --- a/website/docs/r/iot_thing_group.html.markdown +++ b/website/docs/r/iot_thing_group.html.markdown @@ -12,7 +12,7 @@ Manages an AWS IoT Thing Group. ## Example Usage -```hcl +```terraform resource "aws_iot_thing_group" "parent" { name = "parent" } @@ -45,18 +45,18 @@ resource "aws_iot_thing_group" "example" { * `properties` - (Optional) The Thing Group properties. Defined below. * `tags` - (Optional) Key-value mapping of resource tags -## properties Reference +### properties Reference * `attribute_payload` - (Optional) The Thing Group attributes. Defined below. * `description` - (Optional) A description of the Thing Group. -## attribute_payload Reference +### attribute_payload Reference * `attributes` - (Optional) Key-value map. ## Attributes Reference -In addition to the arguments above, the following attributes are exported: +In addition to all arguments above, the following attributes are exported: * `arn` - The ARN of the Thing Group. * `id` - The Thing Group ID. diff --git a/website/docs/r/iot_thing_group_membership.html.markdown b/website/docs/r/iot_thing_group_membership.html.markdown index a188ee250c6e..039fad4c5c38 100644 --- a/website/docs/r/iot_thing_group_membership.html.markdown +++ b/website/docs/r/iot_thing_group_membership.html.markdown @@ -27,6 +27,12 @@ resource "aws_iot_thing_group_membership" "example" { * `thing_group_name` - (Required). The name of the group to which you are adding a thing. * `override_dynamic_group` - (Optional) Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The membership ID. + ## Import IoT Thing Group Membership can be imported using the thing group name and thing name. From e93c5ca71070cdf65fd550d88f97f9b4894aaa2c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 16:18:01 -0500 Subject: [PATCH 298/304] Fix typo --- website/docs/r/lambda_layer_version.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/lambda_layer_version.html.markdown b/website/docs/r/lambda_layer_version.html.markdown index cbf4342e68d2..64bd330f674d 100644 --- a/website/docs/r/lambda_layer_version.html.markdown +++ b/website/docs/r/lambda_layer_version.html.markdown @@ -38,7 +38,7 @@ For larger deployment packages it is recommended by Amazon to upload via S3, sin The following arguments are required: -* `layer_name` - (Required) A unique name for your Lambda Layer +* `layer_name` - (Required) Unique name for your Lambda Layer The following arguments are optional: From 2b937d8a93330ad39ddc51c4561c27ec30039a48 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Nov 2021 16:24:55 -0500 Subject: [PATCH 299/304] Prevent tfproviderdocs crash. --- website/docs/r/iot_thing_group_membership.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/iot_thing_group_membership.html.markdown b/website/docs/r/iot_thing_group_membership.html.markdown index 039fad4c5c38..c79d536dc798 100644 --- a/website/docs/r/iot_thing_group_membership.html.markdown +++ b/website/docs/r/iot_thing_group_membership.html.markdown @@ -23,8 +23,8 @@ resource "aws_iot_thing_group_membership" "example" { ## Argument Reference -* `thing_name` - (Required. The name of the thing to add to a group. -* `thing_group_name` - (Required). The name of the group to which you are adding a thing. +* `thing_name` - (Required) The name of the thing to add to a group. +* `thing_group_name` - (Required) The name of the group to which you are adding a thing. * `override_dynamic_group` - (Optional) Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. ## Attributes Reference From 9848f064e955dd5790b6a35465fc565c33395cb1 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 16:27:25 -0500 Subject: [PATCH 300/304] Add GovCloud skip --- internal/service/lambda/function_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/lambda/function_test.go b/internal/service/lambda/function_test.go index f6b411d4c301..ad898535f5be 100644 --- a/internal/service/lambda/function_test.go +++ b/internal/service/lambda/function_test.go @@ -24,12 +24,12 @@ import ( func init() { acctest.RegisterServiceErrorCheckFunc(lambda.EndpointsID, testAccErrorCheckSkipLambda) - } func testAccErrorCheckSkipLambda(t *testing.T) resource.ErrorCheckFunc { return acctest.ErrorCheckSkipMessagesContaining(t, "InvalidParameterValueException: Unsupported source arn", + "InvalidParameterValueException: CompatibleArchitectures are not", ) } From 7af9627677f522ab00b05784e07ff394ce4c9a5c Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 18 Nov 2021 22:11:25 +0000 Subject: [PATCH 301/304] Update CHANGELOG.md (Manual Trigger) --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a574ca5f33f..96943f737f49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ FEATURES: * **New Data Source:** `aws_emr_release_labels` ([#21767](https://github.com/hashicorp/terraform-provider-aws/issues/21767)) * **New Resource:** `aws_appstream_directory_config` ([#21505](https://github.com/hashicorp/terraform-provider-aws/issues/21505)) +* **New Resource:** `aws_iot_thing_group` ([#21799](https://github.com/hashicorp/terraform-provider-aws/issues/21799)) +* **New Resource:** `aws_iot_thing_group_membership` ([#21799](https://github.com/hashicorp/terraform-provider-aws/issues/21799)) +* **New Resource:** `aws_lambda_layer_version_permission` ([#11941](https://github.com/hashicorp/terraform-provider-aws/issues/11941)) +* **New Resource:** `aws_s3_bucket_replication_configuration` ([#20777](https://github.com/hashicorp/terraform-provider-aws/issues/20777)) * **New Resource:** `aws_s3control_access_point_policy` ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) * **New Resource:** `aws_s3control_multi_region_access_point` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) * **New Resource:** `aws_s3control_multi_region_access_point_policy` ([#21060](https://github.com/hashicorp/terraform-provider-aws/issues/21060)) @@ -15,8 +19,10 @@ ENHANCEMENTS: * aws_s3_access_point: Add `alias` attribute ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) * aws_s3_access_point: Add `endpoints` attribute ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) +* data-source/aws_ec2_instance_type: Add `encryption_in_transit_supported` attribute ([#21837](https://github.com/hashicorp/terraform-provider-aws/issues/21837)) * resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) * resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support resource tagging ([#21769](https://github.com/hashicorp/terraform-provider-aws/issues/21769)) +* resource/aws_kinesis_firehose_delivery_stream: Add `dynamic_partitioning_configuration` configuration block ([#20769](https://github.com/hashicorp/terraform-provider-aws/issues/20769)) * resource/aws_neptune_cluster: Support in-place update of `engine_version` ([#21760](https://github.com/hashicorp/terraform-provider-aws/issues/21760)) * resource/aws_route53_resolver_dnssec_config: Increase resource creation and deletion timeouts to 10 minutes ([#21797](https://github.com/hashicorp/terraform-provider-aws/issues/21797)) * resource/aws_sagemaker_endpoint: Add `deployment_config` argument ([#21765](https://github.com/hashicorp/terraform-provider-aws/issues/21765)) @@ -24,9 +30,12 @@ ENHANCEMENTS: BUG FIXES: * aws_s3_access_point: `vpc_configuration.vpc_id` is _ForceNew_ ([#19294](https://github.com/hashicorp/terraform-provider-aws/issues/19294)) +* data-source/aws_cloudfront_response_headers_policy: Correctly set `custom_headers_config` attribute ([#21838](https://github.com/hashicorp/terraform-provider-aws/issues/21838)) * resource/aws_autoscaling_group: Fix pending state in instance refresh ([#21777](https://github.com/hashicorp/terraform-provider-aws/issues/21777)) * resource/aws_cloudfront_cache_policy: Fix 0 values for `default_ttl`, `max_ttl` and `min_ttl` arguments ([#21793](https://github.com/hashicorp/terraform-provider-aws/issues/21793)) * resource/aws_internet_gateway: Allow `available` as a *pending* state during gateway detach ([#21794](https://github.com/hashicorp/terraform-provider-aws/issues/21794)) +* resource/aws_lambda_layer_version: Increase MaxItems for compatible_runtimes field to 15. ([#21825](https://github.com/hashicorp/terraform-provider-aws/issues/21825)) +* resource/aws_route: On route creation with high custom creation timeout configured, the aws_route resource does no longer give up before the create timeout is exceeded (previously it was giving up after 20 not found checks). ([#21831](https://github.com/hashicorp/terraform-provider-aws/issues/21831)) * resource/aws_security_group: Fix lack of pagination when describing security groups ([#21743](https://github.com/hashicorp/terraform-provider-aws/issues/21743)) ## 3.65.0 (November 11, 2021) From 9f74fbd611832862d95ff66449fd37ff714ac862 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 18 Nov 2021 17:11:31 -0500 Subject: [PATCH 302/304] docs/layer_version: Add warning --- website/docs/r/lambda_layer_version.html.markdown | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/docs/r/lambda_layer_version.html.markdown b/website/docs/r/lambda_layer_version.html.markdown index 64bd330f674d..e57ab3c5ed7d 100644 --- a/website/docs/r/lambda_layer_version.html.markdown +++ b/website/docs/r/lambda_layer_version.html.markdown @@ -10,7 +10,9 @@ description: |- Provides a Lambda Layer Version resource. Lambda Layers allow you to reuse shared bits of code across multiple lambda functions. -For information about Lambda Layers and how to use them, see [AWS Lambda Layers][1] +For information about Lambda Layers and how to use them, see [AWS Lambda Layers][1]. + +~> **NOTE:** Setting `skip_destroy` to `true` means that the AWS Provider will _not_ destroy any layer version, even when running `terraform destroy`. Layer versions are thus intentional dangling resources that are _not_ managed by Terraform and may incur extra expense in your AWS account. ## Example Usage From b2bf2fee2aa9b8360adef89813e37befc832627a Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 18 Nov 2021 22:29:06 +0000 Subject: [PATCH 303/304] Update CHANGELOG.md (Manual Trigger) --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 96943f737f49..731828696b04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ ENHANCEMENTS: * resource/aws_emr_cluster: Add `auto_termination_policy` argument ([#21702](https://github.com/hashicorp/terraform-provider-aws/issues/21702)) * resource/aws_iot_thing_type: Add `tags` argument and `tags_all` attribute to support resource tagging ([#21769](https://github.com/hashicorp/terraform-provider-aws/issues/21769)) * resource/aws_kinesis_firehose_delivery_stream: Add `dynamic_partitioning_configuration` configuration block ([#20769](https://github.com/hashicorp/terraform-provider-aws/issues/20769)) +* resource/aws_lambda_layer_version: Add `skip_destroy` attribute ([#11997](https://github.com/hashicorp/terraform-provider-aws/issues/11997)) * resource/aws_neptune_cluster: Support in-place update of `engine_version` ([#21760](https://github.com/hashicorp/terraform-provider-aws/issues/21760)) * resource/aws_route53_resolver_dnssec_config: Increase resource creation and deletion timeouts to 10 minutes ([#21797](https://github.com/hashicorp/terraform-provider-aws/issues/21797)) * resource/aws_sagemaker_endpoint: Add `deployment_config` argument ([#21765](https://github.com/hashicorp/terraform-provider-aws/issues/21765)) From f5cacad05eaefd4358218e93e0b805c344da6f44 Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Thu, 18 Nov 2021 23:47:38 +0000 Subject: [PATCH 304/304] v3.66.0 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 731828696b04..d9206451cb81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 3.66.0 (Unreleased) +## 3.66.0 (November 18, 2021) FEATURES: