Skip to content

Commit

Permalink
azurerm_kubernetes_cluster & azurerm_kubernetes_cluster_node_pool
Browse files Browse the repository at this point in the history
…: Support for `node_labels` #5531
  • Loading branch information
mbfrahry committed Mar 10, 2020
2 parents 8696c81 + 80c7d5d commit 4c5ae77
Show file tree
Hide file tree
Showing 22 changed files with 537 additions and 121 deletions.
2 changes: 1 addition & 1 deletion azurerm/internal/services/containers/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package client
import (
"github.com/Azure/azure-sdk-for-go/services/containerinstance/mgmt/2018-10-01/containerinstance"
"github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2018-09-01/containerregistry"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-10-01/containerservice"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-11-01/containerservice"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common"
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"strings"
"time"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-10-01/containerservice"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-11-01/containerservice"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/kubernetes"
Expand Down Expand Up @@ -169,6 +169,14 @@ func dataSourceArmKubernetesCluster() *schema.Resource {
Computed: true,
},

"node_labels": {
Type: schema.TypeMap,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},

"node_taints": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -743,6 +751,10 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi
agentPoolProfile["max_pods"] = int(*profile.MaxPods)
}

if profile.NodeLabels != nil {
agentPoolProfile["node_labels"] = profile.NodeLabels
}

if profile.NodeTaints != nil {
agentPoolProfile["node_taints"] = *profile.NodeTaints
}
Expand Down
2 changes: 1 addition & 1 deletion azurerm/internal/services/containers/kubernetes_addons.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package containers
import (
"strings"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-10-01/containerservice"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-11-01/containerservice"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure"
Expand Down
24 changes: 23 additions & 1 deletion azurerm/internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package containers
import (
"fmt"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-10-01/containerservice"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-11-01/containerservice"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure"
Expand Down Expand Up @@ -90,6 +90,15 @@ func SchemaDefaultNodePool() *schema.Schema {
ValidateFunc: validation.IntBetween(1, 100),
},

"node_labels": {
Type: schema.TypeMap,
ForceNew: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},

"node_taints": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -135,6 +144,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA
EnableNodePublicIP: defaultCluster.EnableNodePublicIP,
ScaleSetPriority: defaultCluster.ScaleSetPriority,
ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy,
NodeLabels: defaultCluster.NodeLabels,
NodeTaints: defaultCluster.NodeTaints,
},
}
Expand All @@ -145,13 +155,16 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC

raw := input[0].(map[string]interface{})
enableAutoScaling := raw["enable_auto_scaling"].(bool)
nodeLabelsRaw := raw["node_labels"].(map[string]interface{})
nodeLabels := utils.ExpandMapStringPtrString(nodeLabelsRaw)
nodeTaintsRaw := raw["node_taints"].([]interface{})
nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw)

profile := containerservice.ManagedClusterAgentPoolProfile{
EnableAutoScaling: utils.Bool(enableAutoScaling),
EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)),
Name: utils.String(raw["name"].(string)),
NodeLabels: nodeLabels,
NodeTaints: nodeTaints,
Type: containerservice.AgentPoolType(raw["type"].(string)),
VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)),
Expand Down Expand Up @@ -281,6 +294,14 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
name = *agentPool.Name
}

var nodeLabels map[string]string
if agentPool.NodeLabels != nil {
nodeLabels = make(map[string]string)
for k, v := range agentPool.NodeLabels {
nodeLabels[k] = *v
}
}

var nodeTaints []string
if agentPool.NodeTaints != nil {
nodeTaints = *agentPool.NodeTaints
Expand All @@ -306,6 +327,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
"min_count": minCount,
"name": name,
"node_count": count,
"node_labels": nodeLabels,
"node_taints": nodeTaints,
"os_disk_size_gb": osDiskSizeGB,
"type": string(agentPool.Type),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
"strings"
"time"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-10-01/containerservice"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-11-01/containerservice"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"log"
"time"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-10-01/containerservice"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-11-01/containerservice"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure"
Expand Down Expand Up @@ -99,6 +99,15 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource {
ValidateFunc: validation.IntBetween(1, 100),
},

"node_labels": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},

"node_taints": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -213,6 +222,11 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int
profile.MaxPods = utils.Int32(maxPods)
}

nodeLabelsRaw := d.Get("node_labels").(map[string]interface{})
if nodeLabels := utils.ExpandMapStringPtrString(nodeLabelsRaw); len(nodeLabels) > 0 {
profile.NodeLabels = nodeLabels
}

nodeTaintsRaw := d.Get("node_taints").([]interface{})
if nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw); len(*nodeTaints) > 0 {
profile.NodeTaints = nodeTaints
Expand Down Expand Up @@ -460,6 +474,10 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter
}
d.Set("node_count", count)

if err := d.Set("node_labels", props.NodeLabels); err != nil {
return fmt.Errorf("Error setting `node_labels`: %+v", err)
}

if err := d.Set("node_taints", utils.FlattenStringSlice(props.NodeTaints)); err != nil {
return fmt.Errorf("Error setting `node_taints`: %+v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,33 @@ func testAccDataSourceAzureRMKubernetesCluster_autoscalingWithAvailabilityZones(
})
}

func TestAccDataSourceAzureRMKubernetesCluster_nodeLabels(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccDataSourceAzureRMKubernetesCluster_nodeLabels(t)
}

func testAccDataSourceAzureRMKubernetesCluster_nodeLabels(t *testing.T) {
data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test")
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
labels := map[string]string{"key": "value"}

resource.Test(t, resource.TestCase{
PreCheck: func() { acceptance.PreCheck(t) },
Providers: acceptance.SupportedProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccDataSourceAzureRMKubernetesCluster_nodeLabelsConfig(data, clientId, clientSecret, labels),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(data.ResourceName),
resource.TestCheckResourceAttr(data.ResourceName, "agent_pool_profile.0.node_labels.key", "value"),
),
},
},
})
}

func TestAccDataSourceAzureRMKubernetesCluster_nodeTaints(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccDataSourceAzureRMKubernetesCluster_nodeTaints(t)
Expand Down Expand Up @@ -864,6 +891,18 @@ data "azurerm_kubernetes_cluster" "test" {
`, r)
}

func testAccDataSourceAzureRMKubernetesCluster_nodeLabelsConfig(data acceptance.TestData, clientId string, clientSecret string, labels map[string]string) string {
r := testAccAzureRMKubernetesCluster_nodeLabelsConfig(data, clientId, clientSecret, labels)
return fmt.Sprintf(`
%s
data "azurerm_kubernetes_cluster" "test" {
name = "${azurerm_kubernetes_cluster.test.name}"
resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}"
}
`, r)
}

func testAccDataSourceAzureRMKubernetesCluster_nodeTaintsConfig(data acceptance.TestData, clientId string, clientSecret string) string {
r := testAccAzureRMKubernetesCluster_nodeTaintsConfig(data, clientId, clientSecret)
return fmt.Sprintf(`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ import (
"fmt"
"net/http"
"os"
"reflect"
"regexp"
"strings"
"testing"

"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
Expand Down Expand Up @@ -372,6 +374,46 @@ func testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku(t *testing.T) {
})
}

func TestAccAzureRMKubernetesClusterNodePool_nodeLabels(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesClusterNodePool_nodeLabelds(t)
}

func testAccAzureRMKubernetesClusterNodePool_nodeLabelds(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
labels1 := map[string]string{"key": "value"}
labels2 := map[string]string{"key2": "value2"}
labels3 := map[string]string{}

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acceptance.PreCheck(t) },
Providers: acceptance.SupportedProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data, clientId, clientSecret, labels1),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesNodePoolNodeLabels(data.ResourceName, labels1),
),
},
{
Config: testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data, clientId, clientSecret, labels2),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesNodePoolNodeLabels(data.ResourceName, labels2),
),
},
{
Config: testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data, clientId, clientSecret, labels3),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesNodePoolNodeLabels(data.ResourceName, labels3),
),
},
},
})
}

func TestAccAzureRMKubernetesClusterNodePool_nodePublicIP(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesClusterNodePool_nodePublicIP(t)
Expand Down Expand Up @@ -659,6 +701,45 @@ func testCheckAzureRMKubernetesNodePoolExists(resourceName string) resource.Test
}
}

func testCheckAzureRMKubernetesNodePoolNodeLabels(resourceName string, expectedLabels map[string]string) resource.TestCheckFunc {
return func(s *terraform.State) error {
client := acceptance.AzureProvider.Meta().(*clients.Client).Containers.AgentPoolsClient
ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext

// Ensure we have enough information in state to look up in API
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("Not found: %s", resourceName)
}

name := rs.Primary.Attributes["name"]
kubernetesClusterId := rs.Primary.Attributes["kubernetes_cluster_id"]
parsedK8sId, err := containers.ParseKubernetesClusterID(kubernetesClusterId)
if err != nil {
return fmt.Errorf("Error parsing kubernetes cluster id: %+v", err)
}

agent_pool, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.Name, name)
if err != nil {
return fmt.Errorf("Bad: Get on kubernetesClustersClient: %+v", err)
}

if agent_pool.StatusCode == http.StatusNotFound {
return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q) does not exist", name, parsedK8sId.Name, parsedK8sId.ResourceGroup)
}

labels := make(map[string]string)
for k, v := range agent_pool.NodeLabels {
labels[k] = *v
}
if !reflect.DeepEqual(labels, expectedLabels) {
return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q) nodeLabels %v do not match expected %v", name, parsedK8sId.Name, parsedK8sId.ResourceGroup, labels, expectedLabels)
}

return nil
}
}

func testAccAzureRMKubernetesClusterNodePool_autoScaleConfig(data acceptance.TestData, clientId, clientSecret string) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data, clientId, clientSecret)
return fmt.Sprintf(`
Expand Down Expand Up @@ -897,6 +978,28 @@ resource "azurerm_kubernetes_cluster_node_pool" "manual" {
`, template, numberOfAgents)
}

func testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data acceptance.TestData, clientId, clientSecret string, labels map[string]string) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data, clientId, clientSecret)
labelsSlice := make([]string, 0, len(labels))
for k, v := range labels {
labelsSlice = append(labelsSlice, fmt.Sprintf(" \"%s\" = \"%s\"", k, v))
}
labelsStr := strings.Join(labelsSlice, "\n")
return fmt.Sprintf(`
%s
resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS2_v2"
node_count = 1
node_labels = {
%s
}
}
`, template, labelsStr)
}

func testAccAzureRMKubernetesClusterNodePool_nodePublicIPConfig(data acceptance.TestData, clientId, clientSecret string) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data, clientId, clientSecret)
return fmt.Sprintf(`
Expand Down
Loading

0 comments on commit 4c5ae77

Please sign in to comment.