diff --git a/.changelog/38002.txt b/.changelog/38002.txt new file mode 100644 index 000000000000..0c0ec580e79e --- /dev/null +++ b/.changelog/38002.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_elasticache_replication_group: Add `cluster_mode` argument +``` + +```release-note:enhancement +data-source/aws_elasticache_replication_group: Add `cluster_mode` attribute +``` \ No newline at end of file diff --git a/internal/service/elasticache/replication_group.go b/internal/service/elasticache/replication_group.go index 2480dea7fb4a..bd0910baae92 100644 --- a/internal/service/elasticache/replication_group.go +++ b/internal/service/elasticache/replication_group.go @@ -93,6 +93,12 @@ func resourceReplicationGroup() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "cluster_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.ClusterMode](), + }, "configuration_endpoint_address": { Type: schema.TypeString, Computed: true, @@ -422,6 +428,10 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } } + if v, ok := d.GetOk("cluster_mode"); ok { + input.ClusterMode = awstypes.ClusterMode(v.(string)) + } + if v, ok := d.GetOk("data_tiering_enabled"); ok { input.DataTieringEnabled = aws.Bool(v.(bool)) } @@ -651,6 +661,7 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m d.Set("replicas_per_node_group", len(rgp.NodeGroups[0].NodeGroupMembers)-1) d.Set("cluster_enabled", rgp.ClusterEnabled) + d.Set("cluster_mode", rgp.ClusterMode) d.Set("replication_group_id", rgp.ReplicationGroupId) d.Set(names.AttrARN, rgp.ARN) d.Set("data_tiering_enabled", rgp.DataTiering == awstypes.DataTieringStatusEnabled) @@ -774,6 +785,11 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, requestUpdate = true } + if d.HasChange("cluster_mode") { + input.ClusterMode = awstypes.ClusterMode(d.Get("cluster_mode").(string)) + requestUpdate = true + } + if d.HasChange(names.AttrEngineVersion) { input.EngineVersion = aws.String(d.Get(names.AttrEngineVersion).(string)) requestUpdate = true diff --git a/internal/service/elasticache/replication_group_data_source.go b/internal/service/elasticache/replication_group_data_source.go index f582054f0f2e..895fd6b92e01 100644 --- a/internal/service/elasticache/replication_group_data_source.go +++ b/internal/service/elasticache/replication_group_data_source.go @@ -35,6 +35,10 @@ func dataSourceReplicationGroup() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "cluster_mode": { + Type: schema.TypeString, + Computed: true, + }, "configuration_endpoint_address": { Type: schema.TypeString, Computed: true, @@ -125,9 +129,7 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) - groupID := d.Get("replication_group_id").(string) - - rg, err := findReplicationGroupByID(ctx, conn, groupID) + rg, err := findReplicationGroupByID(ctx, conn, d.Get("replication_group_id").(string)) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache Replication Group", err)) @@ -174,6 +176,7 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, d.Set("node_type", rg.CacheNodeType) d.Set("num_node_groups", len(rg.NodeGroups)) d.Set("replicas_per_node_group", len(rg.NodeGroups[0].NodeGroupMembers)-1) + d.Set("cluster_mode", rg.ClusterMode) d.Set("log_delivery_configuration", flattenLogDeliveryConfigurations(rg.LogDeliveryConfigurations)) d.Set("snapshot_window", rg.SnapshotWindow) d.Set("snapshot_retention_limit", rg.SnapshotRetentionLimit) diff --git a/internal/service/elasticache/replication_group_data_source_test.go b/internal/service/elasticache/replication_group_data_source_test.go index 6a4d98e0243e..8ed72bcd723e 100644 --- a/internal/service/elasticache/replication_group_data_source_test.go +++ b/internal/service/elasticache/replication_group_data_source_test.go @@ -35,6 +35,7 @@ func TestAccElastiCacheReplicationGroupDataSource_basic(t *testing.T) { resource.TestCheckResourceAttr(dataSourceName, "auth_token_enabled", acctest.CtFalse), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(dataSourceName, "automatic_failover_enabled", resourceName, "automatic_failover_enabled"), + resource.TestCheckResourceAttrPair(dataSourceName, "cluster_mode", resourceName, "cluster_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "multi_az_enabled", resourceName, "multi_az_enabled"), resource.TestCheckResourceAttrPair(dataSourceName, "member_clusters.#", resourceName, "member_clusters.#"), resource.TestCheckResourceAttrPair(dataSourceName, "node_type", resourceName, "node_type"), diff --git a/internal/service/elasticache/replication_group_test.go b/internal/service/elasticache/replication_group_test.go index 14c1d1995600..d0e66f53770c 100644 --- a/internal/service/elasticache/replication_group_test.go +++ b/internal/service/elasticache/replication_group_test.go @@ -1314,6 +1314,62 @@ func TestAccElastiCacheReplicationGroup_ClusterMode_singleNode(t *testing.T) { }) } +func TestAccElastiCacheReplicationGroup_ClusterMode_updateFromDisabled_Compatible_Enabled(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var rg awstypes.ReplicationGroup + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationGroup_ClusterMode_updateFromDisabled_Compatible_Enabled(rName, "disabled", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckReplicationGroupExists(ctx, resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "cluster_mode", "disabled"), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replicas_per_node_group", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "num_cache_clusters", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", acctest.Ct2), + ), + }, + { + Config: testAccReplicationGroup_ClusterMode_updateFromDisabled_Compatible_Enabled(rName, "compatible", true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckReplicationGroupExists(ctx, resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "cluster_mode", "compatible"), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replicas_per_node_group", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "num_cache_clusters", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", acctest.Ct2), + ), + }, + { + Config: testAccReplicationGroup_ClusterMode_updateFromDisabled_Compatible_Enabled(rName, names.AttrEnabled, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckReplicationGroupExists(ctx, resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "cluster_mode", names.AttrEnabled), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replicas_per_node_group", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "num_cache_clusters", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", acctest.Ct2), + ), + }, + }, + }) +} + func TestAccElastiCacheReplicationGroup_clusteringAndCacheNodesCausesError(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -3552,6 +3608,30 @@ resource "aws_elasticache_replication_group" "test" { ) } +func testAccReplicationGroup_ClusterMode_updateFromDisabled_Compatible_Enabled(rName, clusterMode string, enableClusterMode bool) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + description = "test description" + node_type = "cache.t2.medium" + apply_immediately = true + automatic_failover_enabled = true + cluster_mode = %[2]q + engine_version = "7.1" + parameter_group_name = tobool("%[3]t") ? "default.redis7.cluster.on" : "default.redis7" + num_node_groups = 1 + replicas_per_node_group = 1 + timeouts { + create = "60m" + update = "60m" + } +} +`, rName, clusterMode, enableClusterMode), + ) +} + func testAccReplicationGroupConfig_useCMKKMSKeyID(rName string) string { return acctest.ConfigCompose( acctest.ConfigVPCWithSubnets(rName, 1), diff --git a/website/docs/d/elasticache_replication_group.html.markdown b/website/docs/d/elasticache_replication_group.html.markdown index 5675f0e80240..3cb56d5832f0 100644 --- a/website/docs/d/elasticache_replication_group.html.markdown +++ b/website/docs/d/elasticache_replication_group.html.markdown @@ -32,6 +32,7 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the created ElastiCache Replication Group. * `auth_token_enabled` - Whether an AuthToken (password) is enabled. * `automatic_failover_enabled` - A flag whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. +* `cluster_mode` - Whether cluster mode is enabled or disabled. * `node_type` – The cluster node type. * `num_cache_clusters` – The number of cache clusters that the replication group has. * `num_node_groups` - Number of node groups (shards) for the replication group. diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 0f2ed4d3c8d4..9f7f4a538bef 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -201,6 +201,7 @@ The following arguments are optional: Only supported for engine type `"redis"` and if the engine version is 6 or higher. Defaults to `true`. * `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If enabled, `num_cache_clusters` must be greater than 1. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`. +* `cluster_mode` - (Optional) Specifies whether cluster mode is enabled or disabled. Valid values are `enabled` or `disabled` or `compatible` * `data_tiering_enabled` - (Optional) Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to `true` when using r6gd nodes. * `engine` - (Optional) Name of the cache engine to be used for the clusters in this replication group. The only valid value is `redis`. * `engine_version` - (Optional) Version number of the cache engine to be used for the cache clusters in this replication group.