Skip to content

Commit

Permalink
Merge pull request #31008 from hashicorp/d-elasticache-rep-group-remo…
Browse files Browse the repository at this point in the history
…ve-azs

elasticache/rep_group: Remove availability zones and other deprecated parameters
  • Loading branch information
YakDriver committed May 2, 2023
2 parents 56003fe + 99bbc19 commit 24c4ed9
Show file tree
Hide file tree
Showing 7 changed files with 447 additions and 796 deletions.
35 changes: 35 additions & 0 deletions .changelog/31008.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
```release-note:breaking-change
resource/aws_elasticache_replication_group: Remove `availability_zones`, `number_cache_clusters`, `replication_group_description` arguments -- use `preferred_cache_cluster_azs`, `num_cache_clusters`, and `description`, respectively, instead
```

```release-note:breaking-change
resource/aws_elasticache_replication_group: Remove `cluster_mode` configuration block -- use top-level `num_node_groups` and `replicas_per_node_group` instead
```

```release-note:note
resource/aws_elasticache_replication_group: Update configurations to use `preferred_cache_cluster_azs` instead of the `availability_zones` argument
```

```release-note:note
resource/aws_elasticache_replication_group: Update configurations to use `num_cache_clusters` instead of the `number_cache_clusters` argument
```

```release-note:note
resource/aws_elasticache_replication_group: Update configurations to use `description` instead of the `replication_group_description` argument
```

```release-note:note
resource/aws_elasticache_replication_group: Update configurations to use top-level `num_node_groups` and `replicas_per_node_group` instead of `cluster_mode.0.num_node_groups` and `cluster_mode.0.replicas_per_node_group`, respectively
```

```release-note:breaking-change
data-source/aws_elasticache_replication_group: Remove `number_cache_clusters`, `replication_group_description` arguments -- use `num_cache_clusters`, and `description`, respectively, instead
```

```release-note:note
data-source/aws_elasticache_replication_group: Update configurations to use `num_cache_clusters` instead of the `number_cache_clusters` argument
```

```release-note:note
data-source/aws_elasticache_replication_group: Update configurations to use `description` instead of the `replication_group_description` argument
```
141 changes: 9 additions & 132 deletions internal/service/elasticache/replication_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,44 +74,10 @@ func ResourceReplicationGroup() *schema.Resource {
Optional: true,
Default: false,
},
"availability_zones": {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
ConflictsWith: []string{"preferred_cache_cluster_azs"},
},
"cluster_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"cluster_mode": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
ConflictsWith: []string{"num_node_groups", "replicas_per_node_group"},
Deprecated: "Use num_node_groups and replicas_per_node_group instead",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"num_node_groups": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ConflictsWith: []string{"num_node_groups", "number_cache_clusters", "num_cache_clusters", "global_replication_group_id"},
Deprecated: "Use root-level num_node_groups instead",
},
"replicas_per_node_group": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ConflictsWith: []string{"replicas_per_node_group"},
Deprecated: "Use root-level replicas_per_node_group instead",
},
},
},
},
"configuration_endpoint_address": {
Type: schema.TypeString,
Computed: true,
Expand All @@ -126,7 +92,6 @@ func ResourceReplicationGroup() *schema.Resource {
Type: schema.TypeString,
Optional: true,
Computed: true,
ExactlyOneOf: []string{"description", "replication_group_description"},
ValidateFunc: validation.StringIsNotEmpty,
},
"engine": {
Expand All @@ -152,7 +117,6 @@ func ResourceReplicationGroup() *schema.Resource {
ForceNew: true,
Computed: true,
ConflictsWith: []string{
"cluster_mode.0.num_node_groups",
"num_node_groups",
"parameter_group_name",
"engine",
Expand Down Expand Up @@ -228,20 +192,13 @@ func ResourceReplicationGroup() *schema.Resource {
Type: schema.TypeInt,
Computed: true,
Optional: true,
ConflictsWith: []string{"cluster_mode.0.num_node_groups", "num_node_groups", "number_cache_clusters"},
ConflictsWith: []string{"num_node_groups"},
},
"num_node_groups": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ConflictsWith: []string{"cluster_mode", "number_cache_clusters", "num_cache_clusters", "global_replication_group_id"},
},
"number_cache_clusters": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
ConflictsWith: []string{"cluster_mode.0.num_node_groups", "num_cache_clusters", "num_node_groups"},
Deprecated: "Use num_cache_clusters instead",
ConflictsWith: []string{"num_cache_clusters", "global_replication_group_id"},
},
"parameter_group_name": {
Type: schema.TypeString,
Expand All @@ -264,10 +221,9 @@ func ResourceReplicationGroup() *schema.Resource {
},
},
"preferred_cache_cluster_azs": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
ConflictsWith: []string{"availability_zones"},
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"primary_endpoint_address": {
Type: schema.TypeString,
Expand All @@ -278,18 +234,9 @@ func ResourceReplicationGroup() *schema.Resource {
Computed: true,
},
"replicas_per_node_group": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ConflictsWith: []string{"cluster_mode"},
},
"replication_group_description": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ExactlyOneOf: []string{"description", "replication_group_description"},
Deprecated: "Use description instead",
ValidateFunc: validation.StringIsNotEmpty,
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"replication_group_id": {
Type: schema.TypeString,
Expand Down Expand Up @@ -395,10 +342,7 @@ func ResourceReplicationGroup() *schema.Resource {
CustomizeDiffValidateReplicationGroupAutomaticFailover,
customizeDiffEngineVersionForceNewOnDowngrade,
customdiff.ComputedIf("member_clusters", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool {
return diff.HasChange("number_cache_clusters") ||
diff.HasChange("num_cache_clusters") ||
diff.HasChange("cluster_mode.0.num_node_groups") ||
diff.HasChange("cluster_mode.0.replicas_per_node_group") ||
return diff.HasChange("num_cache_clusters") ||
diff.HasChange("num_node_groups") ||
diff.HasChange("replicas_per_node_group")
}),
Expand All @@ -419,9 +363,6 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData,
if v, ok := d.GetOk("description"); ok {
input.ReplicationGroupDescription = aws.String(v.(string))
}
if v, ok := d.GetOk("replication_group_description"); ok {
input.ReplicationGroupDescription = aws.String(v.(string))
}

if v, ok := d.GetOk("data_tiering_enabled"); ok {
input.DataTieringEnabled = aws.Bool(v.(bool))
Expand Down Expand Up @@ -453,9 +394,6 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData,
if preferredAZs, ok := d.GetOk("preferred_cache_cluster_azs"); ok {
input.PreferredCacheClusterAZs = flex.ExpandStringList(preferredAZs.([]interface{}))
}
if availabilityZones := d.Get("availability_zones").(*schema.Set); availabilityZones.Len() > 0 {
input.PreferredCacheClusterAZs = flex.ExpandStringSet(availabilityZones)
}

if v, ok := d.GetOk("parameter_group_name"); ok {
input.CacheParameterGroupName = aws.String(v.(string))
Expand Down Expand Up @@ -530,19 +468,6 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData,
input.AuthToken = aws.String(v.(string))
}

if clusterMode, ok := d.GetOk("cluster_mode"); ok {
clusterModeList := clusterMode.([]interface{})
attributes := clusterModeList[0].(map[string]interface{})

if v, ok := attributes["num_node_groups"]; ok && v != 0 {
input.NumNodeGroups = aws.Int64(int64(v.(int)))
}

if v, ok := attributes["replicas_per_node_group"]; ok {
input.ReplicasPerNodeGroup = aws.Int64(int64(v.(int)))
}
}

if v, ok := d.GetOk("num_node_groups"); ok && v != 0 {
input.NumNodeGroups = aws.Int64(int64(v.(int)))
}
Expand All @@ -551,10 +476,6 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData,
input.ReplicasPerNodeGroup = aws.Int64(int64(v.(int)))
}

if cacheClusters, ok := d.GetOk("number_cache_clusters"); ok {
input.NumCacheClusters = aws.Int64(int64(cacheClusters.(int)))
}

if numCacheClusters, ok := d.GetOk("num_cache_clusters"); ok {
input.NumCacheClusters = aws.Int64(int64(numCacheClusters.(int)))
}
Expand Down Expand Up @@ -658,15 +579,10 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m

d.Set("kms_key_id", rgp.KmsKeyId)
d.Set("description", rgp.Description)
d.Set("replication_group_description", rgp.Description)
d.Set("number_cache_clusters", len(rgp.MemberClusters))
d.Set("num_cache_clusters", len(rgp.MemberClusters))
if err := d.Set("member_clusters", flex.FlattenStringSet(rgp.MemberClusters)); err != nil {
return sdkdiag.AppendErrorf(diags, "setting member_clusters: %s", err)
}
if err := d.Set("cluster_mode", flattenNodeGroupsToClusterMode(rgp.NodeGroups)); err != nil {
return sdkdiag.AppendErrorf(diags, "setting cluster_mode attribute: %s", err)
}

d.Set("num_node_groups", len(rgp.NodeGroups))
d.Set("replicas_per_node_group", len(rgp.NodeGroups[0].NodeGroupMembers)-1)
Expand Down Expand Up @@ -747,21 +663,13 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData,
conn := meta.(*conns.AWSClient).ElastiCacheConn()

if d.HasChanges(
"cluster_mode.0.num_node_groups",
"cluster_mode.0.replicas_per_node_group",
"num_node_groups",
"replicas_per_node_group",
) {
err := modifyReplicationGroupShardConfiguration(ctx, conn, d)
if err != nil {
return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s) shard configuration: %s", d.Id(), err)
}
} else if d.HasChange("number_cache_clusters") {
// TODO: remove when number_cache_clusters is removed from resource schema
err := modifyReplicationGroupNumCacheClusters(ctx, conn, d, "number_cache_clusters")
if err != nil {
return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s) clusters: %s", d.Id(), err)
}
} else if d.HasChange("num_cache_clusters") {
err := modifyReplicationGroupNumCacheClusters(ctx, conn, d, "num_cache_clusters")
if err != nil {
Expand All @@ -780,11 +688,6 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData,
requestUpdate = true
}

if d.HasChange("replication_group_description") {
params.ReplicationGroupDescription = aws.String(d.Get("replication_group_description").(string))
requestUpdate = true
}

if d.HasChange("automatic_failover_enabled") {
params.AutomaticFailoverEnabled = aws.Bool(d.Get("automatic_failover_enabled").(bool))
requestUpdate = true
Expand Down Expand Up @@ -1051,33 +954,7 @@ func deleteReplicationGroup(ctx context.Context, replicationGroupID string, conn
return err
}

func flattenNodeGroupsToClusterMode(nodeGroups []*elasticache.NodeGroup) []map[string]interface{} {
if len(nodeGroups) == 0 {
return []map[string]interface{}{}
}

m := map[string]interface{}{
"num_node_groups": len(nodeGroups),
"replicas_per_node_group": (len(nodeGroups[0].NodeGroupMembers) - 1),
}
return []map[string]interface{}{m}
}

func modifyReplicationGroupShardConfiguration(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData) error {
if d.HasChange("cluster_mode.0.num_node_groups") {
err := modifyReplicationGroupShardConfigurationNumNodeGroups(ctx, conn, d, "cluster_mode.0.num_node_groups")
if err != nil {
return err
}
}

if d.HasChange("cluster_mode.0.replicas_per_node_group") {
err := modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx, conn, d, "cluster_mode.0.replicas_per_node_group")
if err != nil {
return err
}
}

if d.HasChange("num_node_groups") {
err := modifyReplicationGroupShardConfigurationNumNodeGroups(ctx, conn, d, "num_node_groups")
if err != nil {
Expand Down
12 changes: 0 additions & 12 deletions internal/service/elasticache/replication_group_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,6 @@ func DataSourceReplicationGroup() *schema.Resource {
Required: true,
ValidateFunc: validateReplicationGroupID,
},
"replication_group_description": {
Type: schema.TypeString,
Computed: true,
Deprecated: "Use description instead",
},
"arn": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -69,11 +64,6 @@ func DataSourceReplicationGroup() *schema.Resource {
Type: schema.TypeInt,
Computed: true,
},
"number_cache_clusters": {
Type: schema.TypeInt,
Computed: true,
Deprecated: "Use num_cache_clusters instead",
},
"member_clusters": {
Type: schema.TypeSet,
Computed: true,
Expand Down Expand Up @@ -140,7 +130,6 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData,

d.SetId(aws.StringValue(rg.ReplicationGroupId))
d.Set("description", rg.Description)
d.Set("replication_group_description", rg.Description)
d.Set("arn", rg.ARN)
d.Set("auth_token_enabled", rg.AuthTokenEnabled)

Expand Down Expand Up @@ -178,7 +167,6 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData,
}

d.Set("num_cache_clusters", len(rg.MemberClusters))
d.Set("number_cache_clusters", len(rg.MemberClusters))
if err := d.Set("member_clusters", flex.FlattenStringList(rg.MemberClusters)); err != nil {
return sdkdiag.AppendErrorf(diags, "setting member_clusters: %s", err)
}
Expand Down
Loading

0 comments on commit 24c4ed9

Please sign in to comment.