diff --git a/Makefile b/Makefile index d5c7767e8..7a97e57f1 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,7 @@ DEFAULT_CHANNEL=alpha GOARCH?=amd64 PLATFORM=$(shell uname -s)-$(shell uname -m) ROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) +KUSTOMIZE_CONFIG?=config/default # Default bundle image tag BUNDLE_IMG ?= controller-bundle:$(VERSION) @@ -129,11 +130,11 @@ uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from a cluster .PHONY: deploy deploy: manifests $(KUSTOMIZE) ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config cd config/manager && $(ROOT)/$(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default | kubectl apply --force-conflicts --server-side -f - + $(KUSTOMIZE) build $(KUSTOMIZE_CONFIG)| kubectl apply --force-conflicts --server-side -f - .PHONY: undeploy undeploy: $(KUSTOMIZE) ## Undeploy controller from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/default | kubectl delete -f - + $(KUSTOMIZE) build $(KUSTOMIZE_CONFIG) | kubectl delete -f - .PHONY: manifests manifests: generate-manifests patch-crds ## Generate manifestcd s e.g. CRD, RBAC etc. diff --git a/apis/datadoghq/common/const.go b/apis/datadoghq/common/const.go index ffcdc0621..89df33a72 100644 --- a/apis/datadoghq/common/const.go +++ b/apis/datadoghq/common/const.go @@ -81,6 +81,7 @@ const ( // Default Image name DefaultAgentImageName string = "agent" DefaultClusterAgentImageName string = "cluster-agent" + DefaultImageRegistry string = "gcr.io/datadoghq" // ExtendedDaemonset defaulting DefaultRollingUpdateMaxUnavailable = "10%" diff --git a/apis/datadoghq/common/envvar.go b/apis/datadoghq/common/envvar.go index 357c20ccc..019a5df97 100644 --- a/apis/datadoghq/common/envvar.go +++ b/apis/datadoghq/common/envvar.go @@ -7,43 +7,97 @@ package common // Datadog env var names const ( - DDIgnoreAutoConf = "DD_IGNORE_AUTOCONF" - DDKubeStateMetricsCoreEnabled = "DD_KUBE_STATE_METRICS_CORE_ENABLED" - DDKubeStateMetricsCoreConfigMap = "DD_KUBE_STATE_METRICS_CORE_CONFIGMAP_NAME" - DDProcessAgentEnabled = "DD_PROCESS_AGENT_ENABLED" - DDAPMEnabled = "DD_APM_ENABLED" - DDSystemProbeNPMEnabledEnvVar = "DD_SYSTEM_PROBE_NETWORK_ENABLED" - DDSystemProbeEnabledEnvVar = "DD_SYSTEM_PROBE_ENABLED" - DDSystemProbeExternal = "DD_SYSTEM_PROBE_EXTERNAL" - DDSystemProbeServiceMonitoringEnabled = "DD_SYSTEM_PROBE_SERVICE_MONITORING_ENABLED" - DDSystemProbeSocket = "DD_SYSPROBE_SOCKET" - DDComplianceEnabled = "DD_COMPLIANCE_CONFIG_ENABLED" - DDComplianceCheckInterval = "DD_COMPLIANCE_CONFIG_CHECK_INTERVAL" - DDHostRootEnvVar = "HOST_ROOT" - DDEnableOOMKillEnvVar = "DD_SYSTEM_PROBE_CONFIG_ENABLE_OOM_KILL" - DDEnableTCPQueueLengthEnvVar = "DD_SYSTEM_PROBE_CONFIG_ENABLE_TCP_QUEUE_LENGTH" - DDLeaderElection = "DD_LEADER_ELECTION" - DDClusterAgentKubeServiceName = "DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME" - DDHealthPort = "DD_HEALTH_PORT" - DDLogsEnabled = "DD_LOGS_ENABLED" - DDLogsConfigContainerCollectAll = "DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL" - DDLogsContainerCollectUsingFiles = "DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE" - DDLogsConfigOpenFilesLimit = "DD_LOGS_CONFIG_OPEN_FILES_LIMIT" - DDPrometheusScrapeEnabled = "DD_PROMETHEUS_SCRAPE_ENABLED" - DDPrometheusScrapeServiceEndpoints = "DD_PROMETHEUS_SCRAPE_SERVICE_ENDPOINTS" - DDPrometheusScrapeChecks = "DD_PROMETHEUS_SCRAPE_CHECKS" - DDCollectKubernetesEvents = "DD_COLLECT_KUBERNETES_EVENTS" - DDLeaderLeaseName = "DD_LEADER_LEASE_NAME" - DDClusterAgentTokenName = "DD_CLUSTER_AGENT_TOKEN_NAME" - DDClusterAgentEnabled = "DD_CLUSTER_AGENT_ENABLED" - DDClusterChecksEnabled = "DD_CLUSTER_CHECKS_ENABLED" - DDCLCRunnerEnabled = "DD_CLC_RUNNER_ENABLED" - DDCLCRunnerHost = "DD_CLC_RUNNER_HOST" - DDCLCRunnerID = "DD_CLC_RUNNER_ID" - DDExtraConfigProviders = "DD_EXTRA_CONFIG_PROVIDERS" - DDEnableMetadataCollection = "DD_ENABLE_METADATA_COLLECTION" - DDDogstatsdEnabled = "DD_USE_DOGSTATSD" - DDHostname = "DD_HOSTNAME" + DatadogHost = "DATADOG_HOST" + DDAdmissionControllerEnabled = "DD_ADMISSION_CONTROLLER_ENABLED" + DDAdmissionControllerInjectConfig = "DD_ADMISSION_CONTROLLER_INJECT_CONFIG_ENABLED" + DDAdmissionControllerInjectConfigMode = "DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE" + DDAdmissionControllerInjectTags = "DD_ADMISSION_CONTROLLER_INJECT_TAGS_ENABLED" + DDAdmissionControllerLocalServiceName = "DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME" + DDAdmissionControllerMutateUnlabelled = "DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED" + DDAdmissionControllerServiceName = "DD_ADMISSION_CONTROLLER_SERVICE_NAME" + DDAPIKey = "DD_API_KEY" + DDAPMEnabled = "DD_APM_ENABLED" + DDAppKey = "DD_APP_KEY" + DDAuthTokenFilePath = "DD_AUTH_TOKEN_FILE_PATH" + DDClcRunnerEnabled = "DD_CLC_RUNNER_ENABLED" + DDClcRunnerHost = "DD_CLC_RUNNER_HOST" + DDClcRunnerID = "DD_CLC_RUNNER_ID" + DDClusterAgentAuthToken = "DD_CLUSTER_AGENT_AUTH_TOKEN" + DDClusterAgentEnabled = "DD_CLUSTER_AGENT_ENABLED" + DDClusterAgentKubeServiceName = "DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME" + DDClusterAgentTokenName = "DD_CLUSTER_AGENT_TOKEN_NAME" + DDClusterChecksEnabled = "DD_CLUSTER_CHECKS_ENABLED" + DDClusterName = "DD_CLUSTER_NAME" + DDCollectKubernetesEvents = "DD_COLLECT_KUBERNETES_EVENTS" + DDComplianceConfigCheckInterval = "DD_COMPLIANCE_CONFIG_CHECK_INTERVAL" + DDComplianceConfigDir = "DD_COMPLIANCE_CONFIG_DIR" + DDComplianceConfigEnabled = "DD_COMPLIANCE_CONFIG_ENABLED" + DDCriSocketPath = "DD_CRI_SOCKET_PATH" + DDddURL = "DD_DD_URL" + DDDogstatsdEnabled = "DD_USE_DOGSTATSD" + DDDogstatsdMapperProfiles = "DD_DOGSTATSD_MAPPER_PROFILES" + DDDogstatsdOriginDetection = "DD_DOGSTATSD_ORIGIN_DETECTION" + DDDogstatsdPort = "DD_DOGSTATSD_PORT" + DDDogstatsdSocket = "DD_DOGSTATSD_SOCKET" + DDEnableMetadataCollection = "DD_ENABLE_METADATA_COLLECTION" + DDEnableOOMKillEnvVar = "DD_SYSTEM_PROBE_CONFIG_ENABLE_OOM_KILL" + DDEnableTCPQueueLengthEnvVar = "DD_SYSTEM_PROBE_CONFIG_ENABLE_TCP_QUEUE_LENGTH" + DDExternalMetricsProviderAPIKey = "DD_EXTERNAL_METRICS_PROVIDER_API_KEY" + DDExternalMetricsProviderAppKey = "DD_EXTERNAL_METRICS_PROVIDER_APP_KEY" + DDExternalMetricsProviderEndpoint = "DD_EXTERNAL_METRICS_PROVIDER_ENDPOINT" + DDExtraConfigProviders = "DD_EXTRA_CONFIG_PROVIDERS" + DDExtraListeners = "DD_EXTRA_LISTENERS" + DDHealthPort = "DD_HEALTH_PORT" + DDHostname = "DD_HOSTNAME" + DDHostRootEnvVar = "HOST_ROOT" + DDIgnoreAutoConf = "DD_IGNORE_AUTOCONF" + DDKubeletCAPath = "DD_KUBELET_CLIENT_CA" + DDKubeletHost = "DD_KUBERNETES_KUBELET_HOST" + DDKubeletTLSVerify = "DD_KUBELET_TLS_VERIFY" + DDKubeStateMetricsCoreConfigMap = "DD_KUBE_STATE_METRICS_CORE_CONFIGMAP_NAME" + DDKubeStateMetricsCoreEnabled = "DD_KUBE_STATE_METRICS_CORE_ENABLED" + DDLeaderElection = "DD_LEADER_ELECTION" + DDLeaderLeaseName = "DD_LEADER_LEASE_NAME" + DDLogLevel = "DD_LOG_LEVEL" + DDLogsConfigContainerCollectAll = "DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL" + DDLogsConfigOpenFilesLimit = "DD_LOGS_CONFIG_OPEN_FILES_LIMIT" + DDLogsContainerCollectUsingFiles = "DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE" + DDLogsEnabled = "DD_LOGS_ENABLED" + DDMetricsProviderEnabled = "DD_EXTERNAL_METRICS_PROVIDER_ENABLED" + DDMetricsProviderPort = "DD_EXTERNAL_METRICS_PROVIDER_PORT" + DDMetricsProviderUseDatadogMetric = "DD_EXTERNAL_METRICS_PROVIDER_USE_DATADOGMETRIC_CRD" + DDMetricsProviderWPAController = "DD_EXTERNAL_METRICS_PROVIDER_WPA_CONTROLLER" + DDNamespaceLabelsAsTags = "DD_KUBERNETES_NAMESPACE_LABELS_AS_TAGS" + DDNodeLabelsAsTags = "DD_KUBERNETES_NODE_LABELS_AS_TAGS" + DDPodAnnotationsAsTags = "DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS" + DDPodLabelsAsTags = "DD_KUBERNETES_POD_LABELS_AS_TAGS" + DDPPMReceiverSocket = "DD_APM_RECEIVER_SOCKET" + DDProcessAgentEnabled = "DD_PROCESS_AGENT_ENABLED" + DDPrometheusScrapeChecks = "DD_PROMETHEUS_SCRAPE_CHECKS" + DDPrometheusScrapeEnabled = "DD_PROMETHEUS_SCRAPE_ENABLED" + DDPrometheusScrapeServiceEndpoints = "DD_PROMETHEUS_SCRAPE_SERVICE_ENDPOINTS" + DDRuntimeSecurityConfigEnabled = "DD_RUNTIME_SECURITY_CONFIG_ENABLED" + DDRuntimeSecurityConfigPoliciesDir = "DD_RUNTIME_SECURITY_CONFIG_POLICIES_DIR" + DDRuntimeSecurityConfigRemoteTaggerEnabled = "DD_RUNTIME_SECURITY_CONFIG_REMOTE_TAGGER" + DDRuntimeSecurityConfigSocket = "DD_RUNTIME_SECURITY_CONFIG_SOCKET" + DDRuntimeSecurityConfigSyscallMonitorEnabled = "DD_RUNTIME_SECURITY_CONFIG_SYSCALL_MONITOR_ENABLED" + DDSecretBackendCommand = "DD_SECRET_BACKEND_COMMAND" + DDSite = "DD_SITE" + DDSystemProbeAgentEnabled = "DD_SYSTEM_PROBE_ENABLED" + DDSystemProbeBPFDebugEnabled = DDSystemProbeEnvPrefix + "BPF_DEBUG" + DDSystemProbeCollectDNSStatsEnabled = "DD_COLLECT_DNS_STATS" + DDSystemProbeConntrackEnabled = DDSystemProbeEnvPrefix + "ENABLE_CONNTRACK" + DDSystemProbeDebugPort = DDSystemProbeEnvPrefix + "DEBUG_PORT" + DDSystemProbeEnabled = "DD_SYSTEM_PROBE_ENABLED" + DDSystemProbeEnvPrefix = "DD_SYSTEM_PROBE_CONFIG_" + DDSystemProbeExternal = "DD_SYSTEM_PROBE_EXTERNAL" + DDSystemProbeNPMEnabled = "DD_SYSTEM_PROBE_NETWORK_ENABLED" + DDSystemProbeServiceMonitoringEnabled = "DD_SYSTEM_PROBE_SERVICE_MONITORING_ENABLED" + DDSystemProbeSocket = "DD_SYSPROBE_SOCKET" + DDSystemProbeOOMKillEnabled = DDSystemProbeEnvPrefix + "ENABLE_OOM_KILL" + DDSystemProbeTCPQueueLengthEnabled = DDSystemProbeEnvPrefix + "ENABLE_TCP_QUEUE_LENGTH" + DDTags = "DD_TAGS" + DockerHost = "DOCKER_HOST" // KubernetesEnvvarName Env var used by the Datadog Agent container entrypoint // to add kubelet config provider and listener diff --git a/apis/datadoghq/v1alpha1/const.go b/apis/datadoghq/v1alpha1/const.go index de64a082c..14ebbbde5 100644 --- a/apis/datadoghq/v1alpha1/const.go +++ b/apis/datadoghq/v1alpha1/const.go @@ -7,79 +7,7 @@ package v1alpha1 // Datadog env var names const ( - DatadogHost = "DATADOG_HOST" - DDAPIKey = "DD_API_KEY" - DDSecretBackendCommand = "DD_SECRET_BACKEND_COMMAND" - DDClusterName = "DD_CLUSTER_NAME" - DDSite = "DD_SITE" - DDddURL = "DD_DD_URL" - DDLogLevel = "DD_LOG_LEVEL" - DDNamespaceLabelsAsTags = "DD_KUBERNETES_NAMESPACE_LABELS_AS_TAGS" - DDNodeLabelsAsTags = "DD_KUBERNETES_NODE_LABELS_AS_TAGS" - DDPodLabelsAsTags = "DD_KUBERNETES_POD_LABELS_AS_TAGS" - DDPodAnnotationsAsTags = "DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS" - DDTags = "DD_TAGS" - DDCollectKubeEvents = "DD_COLLECT_KUBERNETES_EVENTS" - DDLeaderElection = "DD_LEADER_ELECTION" - DDLeaderLeaseName = "DD_LEADER_LEASE_NAME" - DDLogsEnabled = "DD_LOGS_ENABLED" - DDLogsConfigContainerCollectAll = "DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL" - DDLogsContainerCollectUsingFiles = "DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE" - DDLogsConfigOpenFilesLimit = "DD_LOGS_CONFIG_OPEN_FILES_LIMIT" - DDDogstatsdOriginDetection = "DD_DOGSTATSD_ORIGIN_DETECTION" - DDDogstatsdPort = "DD_DOGSTATSD_PORT" - DDDogstatsdSocket = "DD_DOGSTATSD_SOCKET" - DDDogstatsdMapperProfiles = "DD_DOGSTATSD_MAPPER_PROFILES" - DDClusterAgentAuthToken = "DD_CLUSTER_AGENT_AUTH_TOKEN" - DDClusterAgentTokenName = "DD_CLUSTER_AGENT_TOKEN_NAME" - DDMetricsProviderEnabled = "DD_EXTERNAL_METRICS_PROVIDER_ENABLED" - DDMetricsProviderPort = "DD_EXTERNAL_METRICS_PROVIDER_PORT" - DDMetricsProviderUseDatadogMetric = "DD_EXTERNAL_METRICS_PROVIDER_USE_DATADOGMETRIC_CRD" - DDMetricsProviderWPAController = "DD_EXTERNAL_METRICS_PROVIDER_WPA_CONTROLLER" - DDAppKey = "DD_APP_KEY" - DDExtraListeners = "DD_EXTRA_LISTENERS" - DDHostname = "DD_HOSTNAME" - DDPPMReceiverSocket = "DD_APM_RECEIVER_SOCKET" - DDSystemProbeAgentEnabled = "DD_SYSTEM_PROBE_ENABLED" - DDSystemProbeSocketPath = "DD_SYSPROBE_SOCKET" - DDSystemProbeCollectDNSStatsEnabled = "DD_COLLECT_DNS_STATS" - DDSystemProbeNPMEnabled = "DD_SYSTEM_PROBE_NETWORK_ENABLED" - DDSystemProbeEnvPrefix = "DD_SYSTEM_PROBE_CONFIG_" - DDSystemProbeDebugPort = DDSystemProbeEnvPrefix + "DEBUG_PORT" - DDSystemProbeConntrackEnabled = DDSystemProbeEnvPrefix + "ENABLE_CONNTRACK" - DDSystemProbeBPFDebugEnabled = DDSystemProbeEnvPrefix + "BPF_DEBUG" - DDSystemProbeTCPQueueLengthEnabled = DDSystemProbeEnvPrefix + "ENABLE_TCP_QUEUE_LENGTH" - DDSystemProbeOOMKillEnabled = DDSystemProbeEnvPrefix + "ENABLE_OOM_KILL" - DDKubeletHost = "DD_KUBERNETES_KUBELET_HOST" - DDKubeletTLSVerify = "DD_KUBELET_TLS_VERIFY" - DDKubeletCAPath = "DD_KUBELET_CLIENT_CA" - DDCriSocketPath = "DD_CRI_SOCKET_PATH" - DockerHost = "DOCKER_HOST" - DDAdmissionControllerEnabled = "DD_ADMISSION_CONTROLLER_ENABLED" - DDAdmissionControllerMutateUnlabelled = "DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED" - DDAdmissionControllerInjectConfig = "DD_ADMISSION_CONTROLLER_INJECT_CONFIG_ENABLED" - DDAdmissionControllerInjectTags = "DD_ADMISSION_CONTROLLER_INJECT_TAGS_ENABLED" - DDAdmissionControllerServiceName = "DD_ADMISSION_CONTROLLER_SERVICE_NAME" - DDAdmissionControllerInjectConfigMode = "DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE" - DDAdmissionControllerLocalServiceName = "DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME" - DDComplianceConfigEnabled = "DD_COMPLIANCE_CONFIG_ENABLED" - DDComplianceConfigCheckInterval = "DD_COMPLIANCE_CONFIG_CHECK_INTERVAL" - DDComplianceConfigDir = "DD_COMPLIANCE_CONFIG_DIR" - DDRuntimeSecurityConfigEnabled = "DD_RUNTIME_SECURITY_CONFIG_ENABLED" - DDRuntimeSecurityConfigPoliciesDir = "DD_RUNTIME_SECURITY_CONFIG_POLICIES_DIR" - DDRuntimeSecurityConfigSocket = "DD_RUNTIME_SECURITY_CONFIG_SOCKET" - DDRuntimeSecurityConfigSyscallMonitorEnabled = "DD_RUNTIME_SECURITY_CONFIG_SYSCALL_MONITOR_ENABLED" - DDRuntimeSecurityConfigRemoteTaggerEnabled = "DD_RUNTIME_SECURITY_CONFIG_REMOTE_TAGGER" - DDExternalMetricsProviderEndpoint = "DD_EXTERNAL_METRICS_PROVIDER_ENDPOINT" - DDPrometheusScrapeEnabled = "DD_PROMETHEUS_SCRAPE_ENABLED" - DDPrometheusScrapeServiceEndpoints = "DD_PROMETHEUS_SCRAPE_SERVICE_ENDPOINTS" - DDPrometheusScrapeChecks = "DD_PROMETHEUS_SCRAPE_CHECKS" - DDExternalMetricsProviderAPIKey = "DD_EXTERNAL_METRICS_PROVIDER_API_KEY" - DDExternalMetricsProviderAppKey = "DD_EXTERNAL_METRICS_PROVIDER_APP_KEY" - DDAuthTokenFilePath = "DD_AUTH_TOKEN_FILE_PATH" - // Datadog volume names and mount paths - LogDatadogVolumeName = "logdatadog" LogDatadogVolumePath = "/var/log/datadog" TmpVolumeName = "tmp" diff --git a/apis/datadoghq/v1alpha1/secret.go b/apis/datadoghq/v1alpha1/secret.go index 5d90ab6de..b643b30d0 100644 --- a/apis/datadoghq/v1alpha1/secret.go +++ b/apis/datadoghq/v1alpha1/secret.go @@ -11,6 +11,55 @@ import ( apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" ) +// GetDefaultCredentialsSecretName returns the default name for credentials secret +func GetDefaultCredentialsSecretName(dda *DatadogAgent) string { + return dda.Name +} + +// GetAPIKeySecret returns the API key secret name and the key inside the secret +// returns , secretName, secretKey +func GetAPIKeySecret(credentials *DatadogCredentials, defaultName string) (bool, string, string) { + if credentials.APISecret != nil { + if credentials.APISecret.KeyName != "" { + return true, credentials.APISecret.SecretName, credentials.APISecret.KeyName + } + + return true, credentials.APISecret.SecretName, apicommon.DefaultAPIKeyKey + } + + if credentials.APIKeyExistingSecret != "" { + return true, credentials.APIKeyExistingSecret, apicommon.DefaultAPIKeyKey + } + + if credentials.APIKey != "" { + return true, defaultName, apicommon.DefaultAPIKeyKey + } + + return false, defaultName, apicommon.DefaultAPIKeyKey +} + +// GetAppKeySecret returns the APP key secret name and the key inside the secret +// returns , secretName, secretKey +func GetAppKeySecret(credentials *DatadogCredentials, defaultName string) (bool, string, string) { + if credentials.APPSecret != nil { + if credentials.APPSecret.KeyName != "" { + return true, credentials.APPSecret.SecretName, credentials.APPSecret.KeyName + } + + return true, credentials.APPSecret.SecretName, apicommon.DefaultAPPKeyKey + } + + if credentials.AppKeyExistingSecret != "" { + return true, credentials.AppKeyExistingSecret, apicommon.DefaultAPPKeyKey + } + + if credentials.AppKey != "" { + return true, defaultName, apicommon.DefaultAPPKeyKey + } + + return false, defaultName, apicommon.DefaultAPPKeyKey +} + // GetKeysFromCredentials returns any key data that need to be stored in a new secret func GetKeysFromCredentials(credentials *DatadogCredentials) map[string][]byte { data := make(map[string][]byte) diff --git a/apis/datadoghq/v1alpha1/utils.go b/apis/datadoghq/v1alpha1/utils.go index 22f9bad8d..77ffe5fa5 100644 --- a/apis/datadoghq/v1alpha1/utils.go +++ b/apis/datadoghq/v1alpha1/utils.go @@ -27,10 +27,13 @@ func GetConfName(owner metav1.Object, conf *CustomConfigSpec, defaultName string return fmt.Sprintf("%s-%s", owner.GetName(), defaultName) } -// GetAgentServiceAccount returns the node agent serviceAccountName +// GetAgentServiceAccount returns the agent serviceAccountName func GetAgentServiceAccount(dda *DatadogAgent) string { saDefault := fmt.Sprintf("%s-%s", dda.Name, common.DefaultAgentResourceSuffix) + if dda.Spec.Agent.Rbac != nil && dda.Spec.Agent.Rbac.ServiceAccountName != nil { + return *dda.Spec.Agent.Rbac.ServiceAccountName + } return saDefault } diff --git a/apis/datadoghq/v2alpha1/datadogagent_default.go b/apis/datadoghq/v2alpha1/datadogagent_default.go index 9f6cb1916..7d07f07e6 100644 --- a/apis/datadoghq/v2alpha1/datadogagent_default.go +++ b/apis/datadoghq/v2alpha1/datadogagent_default.go @@ -6,6 +6,7 @@ package v2alpha1 import ( + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" apiutils "github.com/DataDog/datadog-operator/apis/utils" ) @@ -13,7 +14,6 @@ import ( // Note: many default values are set in the Datadog Agent and deliberately not set by the Operator. const ( defaultSite string = "datadoghq.com" - defaultRegistry string = "gcr.io/datadoghq" defaultLogLevel string = "info" // defaultLogCollectionEnabled bool = false @@ -94,7 +94,7 @@ func defaultGlobalConfig(ddaSpec *DatadogAgentSpec) { } if ddaSpec.Global.Registry == nil { - ddaSpec.Global.Registry = apiutils.NewStringPointer(defaultRegistry) + ddaSpec.Global.Registry = apiutils.NewStringPointer(apicommon.DefaultImageRegistry) } if ddaSpec.Global.LogLevel == nil { diff --git a/apis/datadoghq/v2alpha1/datadogagent_default_test.go b/apis/datadoghq/v2alpha1/datadogagent_default_test.go index 87e24dc67..5b1a5ef95 100644 --- a/apis/datadoghq/v2alpha1/datadogagent_default_test.go +++ b/apis/datadoghq/v2alpha1/datadogagent_default_test.go @@ -11,6 +11,7 @@ import ( "github.com/google/go-cmp/cmp" assert "github.com/stretchr/testify/require" + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" apiutils "github.com/DataDog/datadog-operator/apis/utils" ) @@ -33,7 +34,7 @@ func Test_defaultGlobal(t *testing.T) { want: &DatadogAgentSpec{ Global: &GlobalConfig{ Site: apiutils.NewStringPointer(defaultSite), - Registry: apiutils.NewStringPointer(defaultRegistry), + Registry: apiutils.NewStringPointer(apicommon.DefaultImageRegistry), LogLevel: apiutils.NewStringPointer(defaultLogLevel), }, }, diff --git a/apis/datadoghq/v2alpha1/secret.go b/apis/datadoghq/v2alpha1/secret.go new file mode 100644 index 000000000..22c7a293e --- /dev/null +++ b/apis/datadoghq/v2alpha1/secret.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package v2alpha1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetDefaultCredentialsSecretName returns the default name for credentials secret +func GetDefaultCredentialsSecretName(dda metav1.Object) string { + return dda.GetName() +} + +// GetDefaultDCATokenSecretName returns the default name for cluster-agent secret +func GetDefaultDCATokenSecretName(dda metav1.Object) string { + return fmt.Sprintf("%s-token", dda.GetName()) +} diff --git a/apis/datadoghq/v2alpha1/utils.go b/apis/datadoghq/v2alpha1/utils.go index 1aabee5fe..5c5cd9d63 100644 --- a/apis/datadoghq/v2alpha1/utils.go +++ b/apis/datadoghq/v2alpha1/utils.go @@ -32,6 +32,14 @@ func GetClusterAgentServiceAccount(dda *DatadogAgent) string { return saDefault } +// GetAgentServiceAccount returns the agent service account name +func GetAgentServiceAccount(dda *DatadogAgent) string { + saDefault := fmt.Sprintf("%s-%s", dda.Name, common.DefaultAgentResourceSuffix) + + // Todo: implement the support of override + return saDefault +} + // GetClusterChecksRunnerServiceAccount return the cluster-checks-runner service account name func GetClusterChecksRunnerServiceAccount(dda *DatadogAgent) string { saDefault := fmt.Sprintf("%s-%s", dda.Name, common.DefaultClusterChecksRunnerResourceSuffix) diff --git a/config/test-v2/kustomization.yaml b/config/test-v2/kustomization.yaml new file mode 100644 index 000000000..47cd55c6c --- /dev/null +++ b/config/test-v2/kustomization.yaml @@ -0,0 +1,86 @@ +# Adds namespace to all resources. +namespace: system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: datadog-operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +bases: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +# [V2alpha1 testing] configure v2alpha1 as the storage version +patches: +- path: storagev2_in_datadogagents.yaml + target: + group: apiextensions.k8s.io + version: v1 + kind: CustomResourceDefinition + name: datadogagents.datadoghq.com +- path: manager_enablev2_patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: manager + namespace: system + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +#- manager_auth_proxy_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace +- name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml +- name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace +- name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service diff --git a/config/test-v2/manager_auth_proxy_patch.yaml b/config/test-v2/manager_auth_proxy_patch.yaml new file mode 100644 index 000000000..02b5c2f4f --- /dev/null +++ b/config/test-v2/manager_auth_proxy_patch.yaml @@ -0,0 +1,25 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: manager + args: + - "--metrics-addr=127.0.0.1:8080" + - "--enable-leader-election" diff --git a/config/test-v2/manager_enablev2_patch.yaml b/config/test-v2/manager_enablev2_patch.yaml new file mode 100644 index 000000000..2d07aa4cb --- /dev/null +++ b/config/test-v2/manager_enablev2_patch.yaml @@ -0,0 +1,7 @@ +# The following patches adds configure the DatadogAgents CRD to use v2alpha1 as the storage version. +- op: add + path: /spec/template/spec/containers/0/args/- + value: -v2APIEnabled=true +- op: add + path: /spec/template/spec/containers/0/args/- + value: -operatorMetricsEnabled=false diff --git a/config/test-v2/manager_webhook_patch.yaml b/config/test-v2/manager_webhook_patch.yaml new file mode 100644 index 000000000..ab553c007 --- /dev/null +++ b/config/test-v2/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/test-v2/storagev2_in_datadogagents.yaml b/config/test-v2/storagev2_in_datadogagents.yaml new file mode 100644 index 000000000..173307d8a --- /dev/null +++ b/config/test-v2/storagev2_in_datadogagents.yaml @@ -0,0 +1,10 @@ +# The following patches adds configure the DatadogAgents CRD to use v2alpha1 as the storage version. +- op: replace + path: /spec/versions/0/storage + value: false +- op: replace + path: /spec/versions/1/storage + value: true +- op: replace + path: /spec/versions/1/served + value: true diff --git a/config/test-v2/webhookcainjection_patch.yaml b/config/test-v2/webhookcainjection_patch.yaml new file mode 100644 index 000000000..a649bb950 --- /dev/null +++ b/config/test-v2/webhookcainjection_patch.yaml @@ -0,0 +1,15 @@ +# This patch add annotation to admission webhook config and +# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) \ No newline at end of file diff --git a/controllers/datadogagent/agent_test.go b/controllers/datadogagent/agent_test.go index 08d407046..a2c5e1872 100644 --- a/controllers/datadogagent/agent_test.go +++ b/controllers/datadogagent/agent_test.go @@ -957,47 +957,47 @@ func defaultSystemProbeEnvVars() []corev1.EnvVar { }, }, { - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigEnabled, + Name: apicommon.DDRuntimeSecurityConfigEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigRemoteTaggerEnabled, + Name: apicommon.DDRuntimeSecurityConfigRemoteTaggerEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigSyscallMonitorEnabled, + Name: apicommon.DDRuntimeSecurityConfigSyscallMonitorEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeDebugPort, + Name: apicommon.DDSystemProbeDebugPort, Value: "0", }, { - Name: datadoghqv1alpha1.DDSystemProbeSocketPath, + Name: apicommon.DDSystemProbeSocket, Value: "/var/run/sysprobe/sysprobe.sock", }, { - Name: datadoghqv1alpha1.DDSystemProbeNPMEnabled, + Name: apicommon.DDSystemProbeNPMEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeConntrackEnabled, + Name: apicommon.DDSystemProbeConntrackEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeBPFDebugEnabled, + Name: apicommon.DDSystemProbeBPFDebugEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeCollectDNSStatsEnabled, + Name: apicommon.DDSystemProbeCollectDNSStatsEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, + Name: apicommon.DDSystemProbeTCPQueueLengthEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, + Name: apicommon.DDSystemProbeOOMKillEnabled, Value: "false", }, } @@ -1174,14 +1174,14 @@ func defaultSystemProbePodSpec(dda *datadoghqv1alpha1.DatadogAgent) corev1.PodSp }, }...) - agentEnvVars := addEnvVar(defaultEnvVars(nil), datadoghqv1alpha1.DDSystemProbeSocketPath, filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock")) + agentEnvVars := addEnvVar(defaultEnvVars(nil), apicommon.DDSystemProbeSocket, filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock")) agentEnvVars = append(agentEnvVars, []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, + Name: apicommon.DDSystemProbeTCPQueueLengthEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, + Name: apicommon.DDSystemProbeOOMKillEnabled, Value: "false", }, }...) @@ -1294,14 +1294,14 @@ func noSeccompInstallSystemProbeSpec(dda *datadoghqv1alpha1.DatadogAgent) corev1 SubPath: "system-probe.yaml", }, }...) - agentEnvVars := addEnvVar(defaultEnvVars(nil), datadoghqv1alpha1.DDSystemProbeSocketPath, filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock")) + agentEnvVars := addEnvVar(defaultEnvVars(nil), apicommon.DDSystemProbeSocket, filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock")) agentEnvVars = append(agentEnvVars, []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, + Name: apicommon.DDSystemProbeTCPQueueLengthEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, + Name: apicommon.DDSystemProbeOOMKillEnabled, Value: "false", }, }...) @@ -1592,12 +1592,12 @@ func defaultOrchestratorEnvVars(dda *datadoghqv1alpha1.DatadogAgent) []corev1.En func runtimeSecurityAgentPodSpec(extraEnv map[string]string, extraDir string) corev1.PodSpec { systemProbeEnv := defaultSystemProbeEnvVars() - systemProbeEnv = addEnvVar(systemProbeEnv, datadoghqv1alpha1.DDAuthTokenFilePath, "/etc/datadog-agent/auth/token") - systemProbeEnv = addEnvVar(systemProbeEnv, datadoghqv1alpha1.DDRuntimeSecurityConfigEnabled, "true") - systemProbeEnv = addEnvVar(systemProbeEnv, datadoghqv1alpha1.DDRuntimeSecurityConfigPoliciesDir, "/etc/datadog-agent/runtime-security.d") - systemProbeEnv = addEnvVar(systemProbeEnv, datadoghqv1alpha1.DDRuntimeSecurityConfigRemoteTaggerEnabled, "true") - systemProbeEnv = addEnvVar(systemProbeEnv, datadoghqv1alpha1.DDRuntimeSecurityConfigSocket, "/var/run/sysprobe/runtime-security.sock") - systemProbeEnv = addEnvVar(systemProbeEnv, datadoghqv1alpha1.DDRuntimeSecurityConfigSyscallMonitorEnabled, "true") + systemProbeEnv = addEnvVar(systemProbeEnv, apicommon.DDAuthTokenFilePath, "/etc/datadog-agent/auth/token") + systemProbeEnv = addEnvVar(systemProbeEnv, apicommon.DDRuntimeSecurityConfigEnabled, "true") + systemProbeEnv = addEnvVar(systemProbeEnv, apicommon.DDRuntimeSecurityConfigPoliciesDir, "/etc/datadog-agent/runtime-security.d") + systemProbeEnv = addEnvVar(systemProbeEnv, apicommon.DDRuntimeSecurityConfigRemoteTaggerEnabled, "true") + systemProbeEnv = addEnvVar(systemProbeEnv, apicommon.DDRuntimeSecurityConfigSocket, "/var/run/sysprobe/runtime-security.sock") + systemProbeEnv = addEnvVar(systemProbeEnv, apicommon.DDRuntimeSecurityConfigSyscallMonitorEnabled, "true") agentWithSystemProbeVolumeMounts := []corev1.VolumeMount{} agentWithSystemProbeVolumeMounts = append(agentWithSystemProbeVolumeMounts, defaultMountVolume()...) @@ -1613,14 +1613,14 @@ func runtimeSecurityAgentPodSpec(extraEnv map[string]string, extraDir string) co SubPath: "system-probe.yaml", }, }...) - agentEnvVars := addEnvVar(defaultEnvVars(extraEnv), datadoghqv1alpha1.DDSystemProbeSocketPath, filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock")) + agentEnvVars := addEnvVar(defaultEnvVars(extraEnv), apicommon.DDSystemProbeSocket, filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock")) agentEnvVars = append(agentEnvVars, []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, + Name: apicommon.DDSystemProbeTCPQueueLengthEnabled, Value: "false", }, { - Name: datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, + Name: apicommon.DDSystemProbeOOMKillEnabled, Value: "false", }, }...) @@ -2071,7 +2071,7 @@ func Test_newExtendedDaemonSetFromInstance(t *testing.T) { hostPortNetworkPodSpec.Containers[0].Ports[0].ContainerPort = 12345 hostPortNetworkPodSpec.Containers[0].Ports[0].HostPort = 12345 hostPortNetworkPodSpec.Containers[0].Env = append(hostPortNetworkPodSpec.Containers[0].Env, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDDogstatsdPort, + Name: apicommon.DDDogstatsdPort, Value: strconv.Itoa(12345), }) @@ -3187,14 +3187,14 @@ func Test_newExtendedDaemonSetFromInstance_SystemProbe(t *testing.T) { } oomKillSpec := systemProbeExtraMountsSpec.DeepCopy() - addEnvVar(oomKillSpec.Containers[0].Env, datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, "true") - addEnvVar(oomKillSpec.Containers[1].Env, datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, "true") - addEnvVar(oomKillSpec.InitContainers[1].Env, datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, "true") + addEnvVar(oomKillSpec.Containers[0].Env, apicommon.DDSystemProbeOOMKillEnabled, "true") + addEnvVar(oomKillSpec.Containers[1].Env, apicommon.DDSystemProbeOOMKillEnabled, "true") + addEnvVar(oomKillSpec.InitContainers[1].Env, apicommon.DDSystemProbeOOMKillEnabled, "true") tpcQueueLengthSpec := systemProbeExtraMountsSpec.DeepCopy() - addEnvVar(tpcQueueLengthSpec.Containers[0].Env, datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, "true") - addEnvVar(tpcQueueLengthSpec.Containers[1].Env, datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, "true") - addEnvVar(tpcQueueLengthSpec.InitContainers[1].Env, datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, "true") + addEnvVar(tpcQueueLengthSpec.Containers[0].Env, apicommon.DDSystemProbeTCPQueueLengthEnabled, "true") + addEnvVar(tpcQueueLengthSpec.Containers[1].Env, apicommon.DDSystemProbeTCPQueueLengthEnabled, "true") + addEnvVar(tpcQueueLengthSpec.InitContainers[1].Env, apicommon.DDSystemProbeTCPQueueLengthEnabled, "true") ddaOOMKill := test.NewDefaultedDatadogAgent("bar", "foo", &test.NewDatadogAgentOptions{ UseEDS: true, diff --git a/controllers/datadogagent/clusteragent.go b/controllers/datadogagent/clusteragent.go index 65d13c74c..a27e2bec5 100644 --- a/controllers/datadogagent/clusteragent.go +++ b/controllers/datadogagent/clusteragent.go @@ -29,6 +29,7 @@ import ( apiutils "github.com/DataDog/datadog-operator/apis/utils" "github.com/DataDog/datadog-operator/controllers/datadogagent/common" "github.com/DataDog/datadog-operator/controllers/datadogagent/component" + componentdca "github.com/DataDog/datadog-operator/controllers/datadogagent/component/clusteragent" "github.com/DataDog/datadog-operator/controllers/datadogagent/merger" "github.com/DataDog/datadog-operator/controllers/datadogagent/object" objectvolume "github.com/DataDog/datadog-operator/controllers/datadogagent/object/volume" @@ -41,8 +42,6 @@ import ( "github.com/DataDog/datadog-operator/pkg/kubernetes" "github.com/DataDog/datadog-operator/pkg/kubernetes/rbac" - componentdca "github.com/DataDog/datadog-operator/controllers/datadogagent/component/clusteragent" - "github.com/DataDog/datadog-operator/controllers/datadogagent/feature" ) @@ -509,22 +508,22 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado }, { Name: apicommon.DDClusterAgentKubeServiceName, - Value: component.GetClusterAgentServiceName(dda), + Value: componentdca.GetClusterAgentServiceName(dda), }, { - Name: datadoghqv1alpha1.DDLeaderElection, + Name: apicommon.DDLeaderElection, Value: "true", }, { - Name: datadoghqv1alpha1.DDLeaderLeaseName, - Value: utils.GetDatadogLeaderElectionResourceName(dda.Name), + Name: apicommon.DDLeaderLeaseName, + Value: utils.GetDatadogLeaderElectionResourceName(dda), }, { - Name: datadoghqv1alpha1.DDComplianceConfigEnabled, + Name: apicommon.DDComplianceConfigEnabled, Value: strconv.FormatBool(complianceEnabled), }, { - Name: datadoghqv1alpha1.DDCollectKubeEvents, + Name: apicommon.DDCollectKubernetesEvents, Value: apiutils.BoolToString(spec.ClusterAgent.Config.CollectEvents), }, { @@ -532,19 +531,19 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado Value: strconv.Itoa(int(*spec.ClusterAgent.Config.HealthPort)), }, { - Name: datadoghqv1alpha1.DDClusterAgentTokenName, - Value: utils.GetDatadogTokenResourceName(dda.Name), + Name: apicommon.DDClusterAgentTokenName, + Value: utils.GetDatadogTokenResourceName(dda), }, } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDClusterAgentAuthToken, + Name: apicommon.DDClusterAgentAuthToken, ValueFrom: getClusterAgentAuthToken(dda), }) if spec.ClusterName != "" { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDClusterName, + Name: apicommon.DDClusterName, Value: spec.ClusterName, }) } @@ -552,14 +551,14 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado if complianceEnabled { if dda.Spec.Agent.Security.Compliance.CheckInterval != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDComplianceConfigCheckInterval, + Name: apicommon.DDComplianceConfigCheckInterval, Value: strconv.FormatInt(dda.Spec.Agent.Security.Compliance.CheckInterval.Nanoseconds(), 10), }) } if dda.Spec.Agent.Security.Compliance.ConfigDir != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDComplianceConfigDir, + Name: apicommon.DDComplianceConfigDir, Value: datadoghqv1alpha1.SecurityAgentComplianceConfigDirVolumePath, }) } @@ -568,71 +567,71 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado // TODO We should be able to disable the agent and still configure the Endpoint for the Cluster Agent. if apiutils.BoolValue(dda.Spec.Agent.Enabled) && spec.Agent.Config.DDUrl != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDddURL, + Name: apicommon.DDddURL, Value: *spec.Agent.Config.DDUrl, }) } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDLogLevel, + Name: apicommon.DDLogLevel, Value: *spec.ClusterAgent.Config.LogLevel, }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAPIKey, + Name: apicommon.DDAPIKey, ValueFrom: getAPIKeyFromSecret(dda), }) if spec.Site != "" { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDSite, + Name: apicommon.DDSite, Value: spec.Site, }) } if isMetricsProviderEnabled(spec.ClusterAgent) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDMetricsProviderEnabled, + Name: apicommon.DDMetricsProviderEnabled, Value: strconv.FormatBool(*spec.ClusterAgent.Config.ExternalMetrics.Enabled), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDMetricsProviderPort, + Name: apicommon.DDMetricsProviderPort, Value: strconv.Itoa(int(getClusterAgentMetricsProviderPort(*spec.ClusterAgent.Config))), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAppKey, + Name: apicommon.DDAppKey, ValueFrom: getAppKeyFromSecret(dda), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDMetricsProviderUseDatadogMetric, + Name: apicommon.DDMetricsProviderUseDatadogMetric, Value: strconv.FormatBool(spec.ClusterAgent.Config.ExternalMetrics.UseDatadogMetrics), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDMetricsProviderWPAController, + Name: apicommon.DDMetricsProviderWPAController, Value: strconv.FormatBool(spec.ClusterAgent.Config.ExternalMetrics.WpaController), }) externalMetricsEndpoint := dda.Spec.ClusterAgent.Config.ExternalMetrics.Endpoint if externalMetricsEndpoint != nil && *externalMetricsEndpoint != "" { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDExternalMetricsProviderEndpoint, + Name: apicommon.DDExternalMetricsProviderEndpoint, Value: *externalMetricsEndpoint, }) } if hasMetricsProviderCustomCredentials(spec.ClusterAgent) { - apiSet, secretName, secretKey := utils.GetAPIKeySecret(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, getDefaultExternalMetricSecretName(dda)) + apiSet, secretName, secretKey := datadoghqv1alpha1.GetAPIKeySecret(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, getDefaultExternalMetricSecretName(dda)) if apiSet { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDExternalMetricsProviderAPIKey, + Name: apicommon.DDExternalMetricsProviderAPIKey, ValueFrom: buildEnvVarFromSecret(secretName, secretKey), }) } - appSet, secretName, secretKey := utils.GetAppKeySecret(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, getDefaultExternalMetricSecretName(dda)) + appSet, secretName, secretKey := datadoghqv1alpha1.GetAppKeySecret(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, getDefaultExternalMetricSecretName(dda)) if appSet { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDExternalMetricsProviderAppKey, + Name: apicommon.DDExternalMetricsProviderAppKey, ValueFrom: buildEnvVarFromSecret(secretName, secretKey), }) } @@ -647,7 +646,7 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado Value: datadoghqv1alpha1.KubeServicesAndEndpointsConfigProviders, }, { - Name: datadoghqv1alpha1.DDExtraListeners, + Name: apicommon.DDExtraListeners, Value: datadoghqv1alpha1.KubeServicesAndEndpointsListeners, }, }...) @@ -655,25 +654,25 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado if isAdmissionControllerEnabled(spec.ClusterAgent) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAdmissionControllerEnabled, + Name: apicommon.DDAdmissionControllerEnabled, Value: strconv.FormatBool(*spec.ClusterAgent.Config.AdmissionController.Enabled), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAdmissionControllerMutateUnlabelled, + Name: apicommon.DDAdmissionControllerMutateUnlabelled, Value: apiutils.BoolToString(spec.ClusterAgent.Config.AdmissionController.MutateUnlabelled), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAdmissionControllerServiceName, + Name: apicommon.DDAdmissionControllerServiceName, Value: getAdmissionControllerServiceName(dda), }) if spec.ClusterAgent.Config.AdmissionController.AgentCommunicationMode != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAdmissionControllerInjectConfigMode, + Name: apicommon.DDAdmissionControllerInjectConfigMode, Value: *spec.ClusterAgent.Config.AdmissionController.AgentCommunicationMode, }) } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAdmissionControllerLocalServiceName, + Name: apicommon.DDAdmissionControllerLocalServiceName, Value: getAgentServiceName(dda), }) } @@ -693,7 +692,7 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado } func getClusterAgentName(dda *datadoghqv1alpha1.DatadogAgent) string { - name := component.GetClusterAgentName(dda) + name := componentdca.GetClusterAgentName(dda) if isClusterAgentEnabled(dda.Spec.ClusterAgent) && dda.Spec.ClusterAgent.DeploymentName != "" { name = dda.Spec.ClusterAgent.DeploymentName } @@ -739,7 +738,7 @@ func (r *Reconciler) manageClusterAgentRBACs(logger logr.Logger, dda *datadoghqv return reconcile.Result{}, err } - rbacResourcesName := getClusterAgentRbacResourcesName(dda) + rbacResourcesName := componentdca.GetClusterAgentRbacResourcesName(dda) // Create or update ClusterRole clusterRole := &rbacv1.ClusterRole{} @@ -882,7 +881,7 @@ func (r *Reconciler) updateIfNeededClusterChecksRunnerClusterRole(logger logr.Lo // cleanupClusterAgentRbacResources deletes ClusterRole, ClusterRoleBindings, and ServiceAccount of the Cluster Agent func (r *Reconciler) cleanupClusterAgentRbacResources(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) (reconcile.Result, error) { - rbacResourcesName := getClusterAgentRbacResourcesName(dda) + rbacResourcesName := componentdca.GetClusterAgentRbacResourcesName(dda) // Delete ClusterRole if result, err := r.cleanupClusterRole(logger, dda, rbacResourcesName); err != nil { return result, err @@ -989,7 +988,7 @@ func buildClusterRole(dda *datadoghqv1alpha1.DatadogAgent, needClusterLevelRBAC if needClusterLevelRBAC { // Cluster Agent is disabled, the Agent needs extra permissions // to collect cluster level metrics and events - rbacRules = append(rbacRules, getDefaultClusterAgentPolicyRules(dda)...) + rbacRules = append(rbacRules, componentdca.GetDefaultClusterAgentClusterRolePolicyRules(dda)...) if apiutils.BoolValue(dda.Spec.Agent.Enabled) { if apiutils.BoolValue(dda.Spec.Agent.Config.CollectEvents) { @@ -997,7 +996,7 @@ func buildClusterRole(dda *datadoghqv1alpha1.DatadogAgent, needClusterLevelRBAC } if apiutils.BoolValue(dda.Spec.Agent.Config.LeaderElection) { - rbacRules = append(rbacRules, getLeaderElectionPolicyRule(dda)...) + rbacRules = append(rbacRules, componentdca.GetLeaderElectionPolicyRule(dda)...) } } } @@ -1007,40 +1006,6 @@ func buildClusterRole(dda *datadoghqv1alpha1.DatadogAgent, needClusterLevelRBAC return clusterRole } -// getDefaultClusterAgentPolicyRules returns the default policy rules for the Cluster Agent -// Can be used by the Agent if the Cluster Agent is disabled -func getDefaultClusterAgentPolicyRules(dda *datadoghqv1alpha1.DatadogAgent) []rbacv1.PolicyRule { - return append([]rbacv1.PolicyRule{ - { - APIGroups: []string{rbac.CoreAPIGroup}, - Resources: []string{ - rbac.ServicesResource, - rbac.EventsResource, - rbac.EndpointsResource, - rbac.PodsResource, - rbac.NodesResource, - rbac.ComponentStatusesResource, - rbac.ConfigMapsResource, - rbac.NamespaceResource, - }, - Verbs: []string{ - rbac.GetVerb, - rbac.ListVerb, - rbac.WatchVerb, - }, - }, - { - APIGroups: []string{rbac.OpenShiftQuotaAPIGroup}, - Resources: []string{rbac.ClusterResourceQuotasResource}, - Verbs: []string{rbac.GetVerb, rbac.ListVerb}, - }, - { - NonResourceURLs: []string{rbac.VersionURL, rbac.HealthzURL}, - Verbs: []string{rbac.GetVerb}, - }, - }, getLeaderElectionPolicyRule(dda)...) -} - // buildClusterRoleBinding creates a ClusterRoleBinding object func buildClusterRoleBinding(dda *datadoghqv1alpha1.DatadogAgent, info roleBindingInfo, agentVersion string) *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{ @@ -1072,23 +1037,7 @@ func buildClusterAgentClusterRole(dda *datadoghqv1alpha1.DatadogAgent, name, age }, } - rbacRules := getDefaultClusterAgentPolicyRules(dda) - - rbacRules = append(rbacRules, rbacv1.PolicyRule{ - // Horizontal Pod Autoscaling - APIGroups: []string{rbac.AutoscalingAPIGroup}, - Resources: []string{rbac.HorizontalPodAutoscalersRecource}, - Verbs: []string{rbac.ListVerb, rbac.WatchVerb}, - }) - - rbacRules = append(rbacRules, rbacv1.PolicyRule{ - APIGroups: []string{rbac.CoreAPIGroup}, - Resources: []string{rbac.NamespaceResource}, - ResourceNames: []string{ - common.KubeSystemResourceName, - }, - Verbs: []string{rbac.GetVerb}, - }) + rbacRules := componentdca.GetDefaultClusterAgentClusterRolePolicyRules(dda) if apiutils.BoolValue(dda.Spec.ClusterAgent.Config.CollectEvents) { rbacRules = append(rbacRules, getEventCollectionPolicyRule(dda)) @@ -1327,7 +1276,7 @@ func buildClusterAgentRole(dda *datadoghqv1alpha1.DatadogAgent, name, agentVersi }, } - rbacRules := getLeaderElectionPolicyRule(dda) + rbacRules := componentdca.GetLeaderElectionPolicyRule(dda) rbacRules = append(rbacRules, rbacv1.PolicyRule{ APIGroups: []string{rbac.CoreAPIGroup}, diff --git a/controllers/datadogagent/clusteragent_test.go b/controllers/datadogagent/clusteragent_test.go index b5fcae02b..46fa7dfea 100644 --- a/controllers/datadogagent/clusteragent_test.go +++ b/controllers/datadogagent/clusteragent_test.go @@ -902,11 +902,11 @@ func Test_newClusterAgentDeploymentFromInstance_MetricsServer(t *testing.T) { ValueFrom: appKeyValue(), }, { - Name: datadoghqv1alpha1.DDMetricsProviderUseDatadogMetric, + Name: apicommon.DDMetricsProviderUseDatadogMetric, Value: "false", }, { - Name: datadoghqv1alpha1.DDMetricsProviderWPAController, + Name: apicommon.DDMetricsProviderWPAController, Value: "false", }, { @@ -950,23 +950,23 @@ func Test_newClusterAgentDeploymentFromInstance_MetricsServer(t *testing.T) { ValueFrom: appKeyValue(), }, { - Name: datadoghqv1alpha1.DDMetricsProviderUseDatadogMetric, + Name: apicommon.DDMetricsProviderUseDatadogMetric, Value: "true", }, { - Name: datadoghqv1alpha1.DDMetricsProviderWPAController, + Name: apicommon.DDMetricsProviderWPAController, Value: "true", }, { - Name: datadoghqv1alpha1.DDExternalMetricsProviderEndpoint, + Name: apicommon.DDExternalMetricsProviderEndpoint, Value: "https://app.datadoghq.eu", }, { - Name: datadoghqv1alpha1.DDExternalMetricsProviderAPIKey, + Name: apicommon.DDExternalMetricsProviderAPIKey, ValueFrom: buildEnvVarFromSecret("foo-metrics-server", "api_key"), }, { - Name: datadoghqv1alpha1.DDExternalMetricsProviderAppKey, + Name: apicommon.DDExternalMetricsProviderAppKey, ValueFrom: buildEnvVarFromSecret("extmetrics-app-key-secret-name", "appkey"), }, { diff --git a/controllers/datadogagent/clusterchecksrunner.go b/controllers/datadogagent/clusterchecksrunner.go index 5dd5507e3..d5db26531 100644 --- a/controllers/datadogagent/clusterchecksrunner.go +++ b/controllers/datadogagent/clusterchecksrunner.go @@ -27,6 +27,7 @@ import ( datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" apiutils "github.com/DataDog/datadog-operator/apis/utils" "github.com/DataDog/datadog-operator/controllers/datadogagent/component" + componentdca "github.com/DataDog/datadog-operator/controllers/datadogagent/component/clusteragent" "github.com/DataDog/datadog-operator/controllers/datadogagent/object" objectvolume "github.com/DataDog/datadog-operator/controllers/datadogagent/object/volume" "github.com/DataDog/datadog-operator/controllers/datadogagent/orchestrator" @@ -327,7 +328,7 @@ func getEnvVarsForClusterChecksRunner(dda *datadoghqv1alpha1.DatadogAgent) []cor }, { Name: apicommon.DDClusterAgentKubeServiceName, - Value: component.GetClusterAgentServiceName(dda), + Value: componentdca.GetClusterAgentServiceName(dda), }, { Name: apicommon.DDExtraConfigProviders, @@ -358,11 +359,11 @@ func getEnvVarsForClusterChecksRunner(dda *datadoghqv1alpha1.DatadogAgent) []cor Value: "false", }, { - Name: apicommon.DDCLCRunnerEnabled, + Name: apicommon.DDClcRunnerEnabled, Value: "true", }, { - Name: apicommon.DDCLCRunnerHost, + Name: apicommon.DDClcRunnerHost, ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: apicommon.FieldPathStatusPodIP, @@ -370,7 +371,7 @@ func getEnvVarsForClusterChecksRunner(dda *datadoghqv1alpha1.DatadogAgent) []cor }, }, { - Name: datadoghqv1alpha1.DDHostname, + Name: apicommon.DDHostname, ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: apicommon.FieldPathSpecNodeName, @@ -378,7 +379,7 @@ func getEnvVarsForClusterChecksRunner(dda *datadoghqv1alpha1.DatadogAgent) []cor }, }, { - Name: apicommon.DDCLCRunnerID, + Name: apicommon.DDClcRunnerID, ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: apicommon.FieldPathMetaName, @@ -388,31 +389,31 @@ func getEnvVarsForClusterChecksRunner(dda *datadoghqv1alpha1.DatadogAgent) []cor } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAPIKey, + Name: apicommon.DDAPIKey, ValueFrom: getAPIKeyFromSecret(dda), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDClusterAgentAuthToken, + Name: apicommon.DDClusterAgentAuthToken, ValueFrom: getClusterAgentAuthToken(dda), }) if spec.ClusterName != "" { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDClusterName, + Name: apicommon.DDClusterName, Value: spec.ClusterName, }) } if spec.Site != "" { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDSite, + Name: apicommon.DDSite, Value: spec.Site, }) } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDLogLevel, + Name: apicommon.DDLogLevel, Value: *spec.ClusterChecksRunner.Config.LogLevel, }) @@ -427,7 +428,7 @@ func getEnvVarsForClusterChecksRunner(dda *datadoghqv1alpha1.DatadogAgent) []cor if spec.Agent.Config.DDUrl != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDddURL, + Name: apicommon.DDddURL, Value: *spec.Agent.Config.DDUrl, }) } diff --git a/controllers/datadogagent/clusterchecksrunner_test.go b/controllers/datadogagent/clusterchecksrunner_test.go index 9880a3dde..fe67fdd1c 100644 --- a/controllers/datadogagent/clusterchecksrunner_test.go +++ b/controllers/datadogagent/clusterchecksrunner_test.go @@ -218,7 +218,7 @@ func clusterChecksRunnerDefaultEnvVars() []corev1.EnvVar { }, }, { - Name: datadoghqv1alpha1.DDHostname, + Name: apicommon.DDHostname, ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: apicommon.FieldPathSpecNodeName, diff --git a/controllers/datadogagent/common_rbac.go b/controllers/datadogagent/common_rbac.go index 62ac3f4f1..e985f6291 100644 --- a/controllers/datadogagent/common_rbac.go +++ b/controllers/datadogagent/common_rbac.go @@ -3,7 +3,9 @@ package datadogagent import ( "context" + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" "github.com/DataDog/datadog-operator/controllers/datadogagent/common" + componentdca "github.com/DataDog/datadog-operator/controllers/datadogagent/component/clusteragent" "github.com/DataDog/datadog-operator/controllers/datadogagent/feature/kubernetesstatecore" "github.com/DataDog/datadog-operator/controllers/datadogagent/object" "github.com/DataDog/datadog-operator/pkg/kubernetes" @@ -75,32 +77,12 @@ func getEventCollectionPolicyRule(dda *datadoghqv1alpha1.DatadogAgent) rbacv1.Po Resources: []string{rbac.ConfigMapsResource}, ResourceNames: []string{ common.DatadogTokenOldResourceName, // Kept for backward compatibility with agent <7.37.0 - utils.GetDatadogTokenResourceName(dda.Name), + utils.GetDatadogTokenResourceName(dda), }, Verbs: []string{rbac.GetVerb, rbac.UpdateVerb}, } } -// getLeaderElectionPolicyRule returns the policy rules for leader election -func getLeaderElectionPolicyRule(dda *datadoghqv1alpha1.DatadogAgent) []rbacv1.PolicyRule { - return []rbacv1.PolicyRule{ - { - APIGroups: []string{rbac.CoreAPIGroup}, - Resources: []string{rbac.ConfigMapsResource}, - ResourceNames: []string{ - common.DatadogLeaderElectionOldResourceName, // Kept for backward compatibility with agent <7.37.0 - utils.GetDatadogLeaderElectionResourceName(dda.Name), - }, - Verbs: []string{rbac.GetVerb, rbac.UpdateVerb}, - }, - { - APIGroups: []string{rbac.CoreAPIGroup}, - Resources: []string{rbac.ConfigMapsResource}, - Verbs: []string{rbac.CreateVerb}, - }, - } -} - func (r *Reconciler) createServiceAccount(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, name, agentVersion string) (reconcile.Result, error) { serviceAccount := buildServiceAccount(dda, name, agentVersion) if err := controllerutil.SetControllerReference(dda, serviceAccount, r.scheme); err != nil { @@ -293,7 +275,7 @@ func isOwnerBasedOnLabels(dda *datadoghqv1alpha1.DatadogAgent, labels map[string func rbacNamesForDda(dda *datadoghqv1alpha1.DatadogAgent, versionInfo *version.Info) []string { return []string{ getAgentRbacResourcesName(dda), - getClusterAgentRbacResourcesName(dda), + componentdca.GetClusterAgentRbacResourcesName(dda), getClusterChecksRunnerRbacResourcesName(dda), getHPAClusterRoleBindingName(dda), getExternalMetricsReaderClusterRoleName(dda, versionInfo), @@ -324,7 +306,7 @@ func isClusterRolesBindingEqual(a, b *rbacv1.ClusterRoleBinding) bool { func checkSecretBackendMultipleProvidersUsed(envVarList []corev1.EnvVar) bool { for _, envVar := range envVarList { - if envVar.Name == datadoghqv1alpha1.DDSecretBackendCommand && envVar.Value == secretBackendMultipleProvidersScript { + if envVar.Name == apicommon.DDSecretBackendCommand && envVar.Value == secretBackendMultipleProvidersScript { return true } } diff --git a/controllers/datadogagent/component/clusteragent/default.go b/controllers/datadogagent/component/clusteragent/default.go index fec2f4016..b0abc8560 100644 --- a/controllers/datadogagent/component/clusteragent/default.go +++ b/controllers/datadogagent/component/clusteragent/default.go @@ -11,18 +11,22 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" apiutils "github.com/DataDog/datadog-operator/apis/utils" + "github.com/DataDog/datadog-operator/controllers/datadogagent/common" "github.com/DataDog/datadog-operator/controllers/datadogagent/component" + "github.com/DataDog/datadog-operator/pkg/controller/utils" "github.com/DataDog/datadog-operator/pkg/defaulting" + "github.com/DataDog/datadog-operator/pkg/kubernetes/rbac" ) // NewDefaultClusterAgentDeployment return a new default cluster-agent deployment func NewDefaultClusterAgentDeployment(dda metav1.Object) *appsv1.Deployment { - deployment := component.NewDeployment(dda, apicommon.DefaultClusterAgentResourceSuffix, component.GetClusterAgentName(dda), component.GetClusterAgentVersion(dda), nil) + deployment := component.NewDeployment(dda, apicommon.DefaultClusterAgentResourceSuffix, GetClusterAgentName(dda), GetClusterAgentVersion(dda), nil) podTemplate := NewDefaultClusterAgentPodTemplateSpec(dda) for key, val := range deployment.GetLabels() { @@ -86,7 +90,7 @@ func defaultPodSpec(dda metav1.Object, volumes []corev1.Volume, volumeMounts []c Containers: []corev1.Container{ { Name: string(apicommonv1.ClusterAgentContainerName), - Image: fmt.Sprintf("%s:%s", apicommon.DefaultClusterAgentImageName, defaulting.ClusterAgentLatestVersion), + Image: fmt.Sprintf("%s/%s:%s", apicommon.DefaultImageRegistry, apicommon.DefaultClusterAgentImageName, defaulting.ClusterAgentLatestVersion), Ports: []corev1.ContainerPort{ { ContainerPort: 5005, @@ -118,7 +122,7 @@ func defaultEnvVars(dda metav1.Object) []corev1.EnvVar { envVars := []corev1.EnvVar{ { Name: apicommon.DDClusterAgentKubeServiceName, - Value: component.GetClusterAgentServiceName(dda), + Value: GetClusterAgentServiceName(dda), }, { Name: apicommon.DDLeaderElection, @@ -152,3 +156,88 @@ func DefaultAffinity() *corev1.Affinity { }, } } + +// GetDefaultClusterAgentRolePolicyRules returns the default policy rules for the Cluster Agent +// Can be used by the Agent if the Cluster Agent is disabled +func GetDefaultClusterAgentRolePolicyRules(dda metav1.Object) []rbacv1.PolicyRule { + rules := []rbacv1.PolicyRule{} + + rules = append(rules, GetLeaderElectionPolicyRule(dda)...) + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{rbac.CoreAPIGroup}, + Resources: []string{rbac.ConfigMapsResource}, + ResourceNames: []string{ + common.DatadogClusterIDResourceName, + }, + Verbs: []string{rbac.GetVerb, rbac.UpdateVerb, rbac.CreateVerb}, + }) + return rules +} + +// GetDefaultClusterAgentClusterRolePolicyRules returns the default policy rules for the Cluster Agent +// Can be used by the Agent if the Cluster Agent is disabled +func GetDefaultClusterAgentClusterRolePolicyRules(dda metav1.Object) []rbacv1.PolicyRule { + return []rbacv1.PolicyRule{ + { + APIGroups: []string{rbac.CoreAPIGroup}, + Resources: []string{ + rbac.ServicesResource, + rbac.EventsResource, + rbac.EndpointsResource, + rbac.PodsResource, + rbac.NodesResource, + rbac.ComponentStatusesResource, + rbac.ConfigMapsResource, + rbac.NamespaceResource, + }, + Verbs: []string{ + rbac.GetVerb, + rbac.ListVerb, + rbac.WatchVerb, + }, + }, + { + APIGroups: []string{rbac.OpenShiftQuotaAPIGroup}, + Resources: []string{rbac.ClusterResourceQuotasResource}, + Verbs: []string{rbac.GetVerb, rbac.ListVerb}, + }, + { + NonResourceURLs: []string{rbac.VersionURL, rbac.HealthzURL}, + Verbs: []string{rbac.GetVerb}, + }, + { + // Horizontal Pod Autoscaling + APIGroups: []string{rbac.AutoscalingAPIGroup}, + Resources: []string{rbac.HorizontalPodAutoscalersRecource}, + Verbs: []string{rbac.ListVerb, rbac.WatchVerb}, + }, + { + APIGroups: []string{rbac.CoreAPIGroup}, + Resources: []string{rbac.NamespaceResource}, + ResourceNames: []string{ + common.KubeSystemResourceName, + }, + Verbs: []string{rbac.GetVerb}, + }, + } +} + +// GetLeaderElectionPolicyRule returns the policy rules for leader election +func GetLeaderElectionPolicyRule(dda metav1.Object) []rbacv1.PolicyRule { + return []rbacv1.PolicyRule{ + { + APIGroups: []string{rbac.CoreAPIGroup}, + Resources: []string{rbac.ConfigMapsResource}, + ResourceNames: []string{ + common.DatadogLeaderElectionOldResourceName, // Kept for backward compatibility with agent <7.37.0 + utils.GetDatadogLeaderElectionResourceName(dda), + }, + Verbs: []string{rbac.GetVerb, rbac.UpdateVerb}, + }, + { + APIGroups: []string{rbac.CoreAPIGroup}, + Resources: []string{rbac.ConfigMapsResource}, + Verbs: []string{rbac.CreateVerb}, + }, + } +} diff --git a/controllers/datadogagent/component/clusteragent/utils.go b/controllers/datadogagent/component/clusteragent/utils.go new file mode 100644 index 000000000..d882fa46f --- /dev/null +++ b/controllers/datadogagent/component/clusteragent/utils.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package clusteragent + +import ( + "fmt" + + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetClusterAgentServiceName return the Cluster-Agent service name based on the DatadogAgent name +func GetClusterAgentServiceName(dda metav1.Object) string { + return fmt.Sprintf("%s-%s", dda.GetName(), apicommon.DefaultClusterAgentResourceSuffix) +} + +// GetClusterAgentName return the Cluster-Agent name based on the DatadogAgent name +func GetClusterAgentName(dda metav1.Object) string { + return fmt.Sprintf("%s-%s", dda.GetName(), apicommon.DefaultClusterAgentResourceSuffix) +} + +// GetClusterAgentVersion return the Cluster-Agent version based on the DatadogAgent info +func GetClusterAgentVersion(dda metav1.Object) string { + // Todo implement this function + return "" +} + +// GetClusterAgentRbacResourcesName return the Cluster-Agent RBAC resource name +func GetClusterAgentRbacResourcesName(dda metav1.Object) string { + return fmt.Sprintf("%s-%s", dda.GetName(), apicommon.DefaultClusterAgentResourceSuffix) +} diff --git a/controllers/datadogagent/component/clusterchecksrunner/default.go b/controllers/datadogagent/component/clusterchecksrunner/default.go index f762f4ef8..c611d56e8 100644 --- a/controllers/datadogagent/component/clusterchecksrunner/default.go +++ b/controllers/datadogagent/component/clusterchecksrunner/default.go @@ -17,6 +17,7 @@ import ( apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" apiutils "github.com/DataDog/datadog-operator/apis/utils" "github.com/DataDog/datadog-operator/controllers/datadogagent/component" + componentdca "github.com/DataDog/datadog-operator/controllers/datadogagent/component/clusteragent" "github.com/DataDog/datadog-operator/pkg/defaulting" ) @@ -127,7 +128,7 @@ func defaultEnvVars(dda metav1.Object) []corev1.EnvVar { envVars := []corev1.EnvVar{ { Name: apicommon.DDClusterAgentKubeServiceName, - Value: component.GetClusterAgentServiceName(dda), + Value: componentdca.GetClusterAgentServiceName(dda), }, { Name: apicommon.DDClusterChecksEnabled, @@ -154,11 +155,11 @@ func defaultEnvVars(dda metav1.Object) []corev1.EnvVar { Value: "false", }, { - Name: apicommon.DDCLCRunnerEnabled, + Name: apicommon.DDClcRunnerEnabled, Value: "true", }, { - Name: apicommon.DDCLCRunnerHost, + Name: apicommon.DDClcRunnerHost, ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: apicommon.FieldPathStatusPodIP, @@ -166,7 +167,7 @@ func defaultEnvVars(dda metav1.Object) []corev1.EnvVar { }, }, { - Name: apicommon.DDCLCRunnerID, + Name: apicommon.DDClcRunnerID, ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: apicommon.FieldPathMetaName, diff --git a/controllers/datadogagent/component/utils.go b/controllers/datadogagent/component/utils.go index f555b14fe..5ec2555fd 100644 --- a/controllers/datadogagent/component/utils.go +++ b/controllers/datadogagent/component/utils.go @@ -266,3 +266,23 @@ func GetAgentVersion(dda metav1.Object) string { func GetClusterChecksRunnerName(dda metav1.Object) string { return fmt.Sprintf("%s-%s", dda.GetName(), apicommon.DefaultClusterChecksRunnerResourceSuffix) } + +// BuildEnvVarFromSource return an *corev1.EnvVar from a Env Var name and *corev1.EnvVarSource +func BuildEnvVarFromSource(name string, source *corev1.EnvVarSource) *corev1.EnvVar { + return &corev1.EnvVar{ + Name: name, + ValueFrom: source, + } +} + +// BuildEnvVarFromSecret return an corev1.EnvVarSource correspond to a secret reference +func BuildEnvVarFromSecret(name, key string) *corev1.EnvVarSource { + return &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + Key: key, + }, + } +} diff --git a/controllers/datadogagent/controller.go b/controllers/datadogagent/controller.go index 8576bf994..71d54f64d 100644 --- a/controllers/datadogagent/controller.go +++ b/controllers/datadogagent/controller.go @@ -166,8 +166,9 @@ func (r *Reconciler) reconcileInstance(ctx context.Context, logger logr.Logger, storeOptions := &dependencies.StoreOptions{ SupportCilium: r.options.SupportCilium, Logger: logger, + Scheme: r.scheme, } - depsStore := dependencies.NewStore(storeOptions) + depsStore := dependencies.NewStore(instance, storeOptions) resourcesManager := feature.NewResourceManagers(depsStore) var errs []error for _, feat := range features { diff --git a/controllers/datadogagent/controller_reconcile_v2.go b/controllers/datadogagent/controller_reconcile_v2.go index f16a373a4..944a21865 100644 --- a/controllers/datadogagent/controller_reconcile_v2.go +++ b/controllers/datadogagent/controller_reconcile_v2.go @@ -94,8 +94,9 @@ func (r *Reconciler) reconcileInstanceV2(ctx context.Context, logger logr.Logger storeOptions := &dependencies.StoreOptions{ SupportCilium: r.options.SupportCilium, Logger: logger, + Scheme: r.scheme, } - depsStore := dependencies.NewStore(storeOptions) + depsStore := dependencies.NewStore(instance, storeOptions) resourcesManager := feature.NewResourceManagers(depsStore) var errs []error for id, feat := range features { @@ -181,6 +182,9 @@ func (r *Reconciler) updateStatusIfNeededV2(logger logr.Logger, agentdeployment func (r *Reconciler) finalizeDadV2(reqLogger logr.Logger, obj client.Object) { dda := obj.(*datadoghqv2alpha1.DatadogAgent) - r.forwarders.Unregister(dda) + if r.options.OperatorMetricsEnabled { + r.forwarders.Unregister(dda) + } + reqLogger.Info("Successfully finalized DatadogAgent") } diff --git a/controllers/datadogagent/dependencies/store.go b/controllers/datadogagent/dependencies/store.go index 08bfa6b18..3ed7be0b9 100644 --- a/controllers/datadogagent/dependencies/store.go +++ b/controllers/datadogagent/dependencies/store.go @@ -11,6 +11,8 @@ import ( "strings" "sync" + "github.com/DataDog/datadog-operator/controllers/datadogagent/component" + "github.com/DataDog/datadog-operator/controllers/datadogagent/object" "github.com/DataDog/datadog-operator/pkg/equality" "github.com/DataDog/datadog-operator/pkg/kubernetes" "github.com/go-logr/logr" @@ -19,6 +21,7 @@ import ( apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,19 +34,21 @@ const ( // StoreClient dependencies store client interface type StoreClient interface { - AddOrUpdate(kind kubernetes.ObjectKind, obj client.Object) + AddOrUpdate(kind kubernetes.ObjectKind, obj client.Object) error Get(kind kubernetes.ObjectKind, namespace, name string) (client.Object, bool) GetOrCreate(kind kubernetes.ObjectKind, namespace, name string) (client.Object, bool) } // NewStore returns a new Store instance -func NewStore(options *StoreOptions) *Store { +func NewStore(owner metav1.Object, options *StoreOptions) *Store { store := &Store{ - deps: make(map[kubernetes.ObjectKind]map[string]client.Object), + deps: make(map[kubernetes.ObjectKind]map[string]client.Object), + owner: owner, } if options != nil { store.supportCilium = options.SupportCilium store.logger = options.Logger + store.scheme = options.Scheme } return store @@ -57,20 +62,23 @@ type Store struct { supportCilium bool + scheme *runtime.Scheme logger logr.Logger + owner metav1.Object } // StoreOptions use to provide to NewStore() function some Store creation options. type StoreOptions struct { SupportCilium bool + Scheme *runtime.Scheme Logger logr.Logger } // AddOrUpdate used to add or update an object in the Store // kind correspond to the object kind, and id can be `namespace/name` identifier of just // `name` if we are talking about a cluster scope object like `ClusterRole`. -func (ds *Store) AddOrUpdate(kind kubernetes.ObjectKind, obj client.Object) { +func (ds *Store) AddOrUpdate(kind kubernetes.ObjectKind, obj client.Object) error { ds.mutex.Lock() defer ds.mutex.Unlock() @@ -83,14 +91,42 @@ func (ds *Store) AddOrUpdate(kind kubernetes.ObjectKind, obj client.Object) { obj.SetLabels(map[string]string{}) } obj.GetLabels()[operatorStoreLabelKey] = "true" + + if ds.owner != nil { + defaultLabels := object.GetDefaultLabels(ds.owner, ds.owner.GetName(), component.GetAgentVersion(ds.owner)) + if len(defaultLabels) > 0 { + for key, val := range defaultLabels { + obj.GetLabels()[key] = val + } + } + + defaultAnnotations := object.GetDefaultAnnotations(ds.owner) + if len(defaultAnnotations) > 0 { + if obj.GetAnnotations() == nil { + obj.SetAnnotations(map[string]string{}) + } + for key, val := range defaultAnnotations { + obj.GetAnnotations()[key] = val + } + } + + // Owner-reference should not be added to cluster level objects + if kind != kubernetes.ClusterRoleBindingKind && kind != kubernetes.ClusterRolesKind { + if err := object.SetOwnerReference(ds.owner, obj, ds.scheme); err != nil { + return fmt.Errorf("store.AddOrUpdate, %w", err) + } + } + } + ds.deps[kind][id] = obj + return nil } // AddOrUpdateStore used to add or update an object in the Store // kind correspond to the object kind, and id can be `namespace/name` identifier of just // `name` if we are talking about a cluster scope object like `ClusterRole`. func (ds *Store) AddOrUpdateStore(kind kubernetes.ObjectKind, obj client.Object) *Store { - ds.AddOrUpdate(kind, obj) + _ = ds.AddOrUpdate(kind, obj) return ds } diff --git a/controllers/datadogagent/dependencies/store_test.go b/controllers/datadogagent/dependencies/store_test.go index 389198cb4..913b9f545 100644 --- a/controllers/datadogagent/dependencies/store_test.go +++ b/controllers/datadogagent/dependencies/store_test.go @@ -10,10 +10,12 @@ import ( "reflect" "testing" + "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" "github.com/DataDog/datadog-operator/pkg/kubernetes" assert "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" @@ -118,6 +120,16 @@ func TestStore_AddOrUpdate(t *testing.T) { }, } + owner := &v2alpha1.DatadogAgent{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "bar", + Name: "foo", + }, + } + + testScheme := runtime.NewScheme() + testScheme.AddKnownTypes(v2alpha1.GroupVersion, &v2alpha1.DatadogAgent{}) + type fields struct { deps map[kubernetes.ObjectKind]map[string]client.Object } @@ -130,6 +142,7 @@ func TestStore_AddOrUpdate(t *testing.T) { fields fields args args validationFunc func(t *testing.T, store *Store) + wantErr bool }{ { name: "add to an empty store", @@ -190,9 +203,14 @@ func TestStore_AddOrUpdate(t *testing.T) { logger := logf.Log.WithName(t.Name()) ds := &Store{ deps: tt.fields.deps, + owner: owner, + scheme: testScheme, logger: logger, } - ds.AddOrUpdate(tt.args.kind, tt.args.obj) + gotErr := ds.AddOrUpdate(tt.args.kind, tt.args.obj) + if gotErr != nil && tt.wantErr == false { + t.Errorf("Store.AddOrUpdate() gotErr = %v, wantErr %v", gotErr, tt.wantErr) + } tt.validationFunc(t, ds) }) } @@ -333,7 +351,6 @@ func TestStore_Apply(t *testing.T) { { name: "one ConfigMap to apply", fields: fields{ - deps: map[kubernetes.ObjectKind]map[string]client.Object{ kubernetes.ConfigMapKind: { "bar/foo": dummyConfigMap1.DeepCopy(), @@ -348,7 +365,6 @@ func TestStore_Apply(t *testing.T) { { name: "one ConfigMap to update", fields: fields{ - deps: map[kubernetes.ObjectKind]map[string]client.Object{ kubernetes.ConfigMapKind: { "bar/foo": dummyConfigMap1bis.DeepCopy(), diff --git a/controllers/datadogagent/feature/cspm/feature.go b/controllers/datadogagent/feature/cspm/feature.go index 0fb060ad2..fa2212d18 100644 --- a/controllers/datadogagent/feature/cspm/feature.go +++ b/controllers/datadogagent/feature/cspm/feature.go @@ -50,7 +50,7 @@ type cspmFeature struct { func (f *cspmFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) { f.owner = dda - if dda.Spec.Features.CSPM != nil && apiutils.BoolValue(dda.Spec.Features.CSPM.Enabled) { + if dda.Spec.Features != nil && dda.Spec.Features.CSPM != nil && apiutils.BoolValue(dda.Spec.Features.CSPM.Enabled) { f.enable = true f.serviceAccountName = v2alpha1.GetClusterAgentServiceAccount(dda) @@ -156,14 +156,14 @@ func (f *cspmFeature) ManageClusterAgent(managers feature.PodTemplateManagers) e } enabledEnvVar := &corev1.EnvVar{ - Name: apicommon.DDComplianceEnabled, + Name: apicommon.DDComplianceConfigEnabled, Value: "true", } managers.EnvVar().AddEnvVarToContainer(apicommonv1.ClusterAgentContainerName, enabledEnvVar) if f.checkInterval != "" { intervalEnvVar := &corev1.EnvVar{ - Name: apicommon.DDComplianceCheckInterval, + Name: apicommon.DDComplianceConfigCheckInterval, Value: f.checkInterval, } managers.EnvVar().AddEnvVarToContainer(apicommonv1.ClusterAgentContainerName, intervalEnvVar) @@ -224,7 +224,7 @@ func (f *cspmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) erro // env vars enabledEnvVar := &corev1.EnvVar{ - Name: apicommon.DDComplianceEnabled, + Name: apicommon.DDComplianceConfigEnabled, Value: "true", } managers.EnvVar().AddEnvVarToContainer(apicommonv1.SecurityAgentContainerName, enabledEnvVar) @@ -237,7 +237,7 @@ func (f *cspmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) erro if f.checkInterval != "" { intervalEnvVar := &corev1.EnvVar{ - Name: apicommon.DDComplianceCheckInterval, + Name: apicommon.DDComplianceConfigCheckInterval, Value: f.checkInterval, } managers.EnvVar().AddEnvVarToContainer(apicommonv1.SecurityAgentContainerName, intervalEnvVar) diff --git a/controllers/datadogagent/feature/cspm/feature_test.go b/controllers/datadogagent/feature/cspm/feature_test.go index 0b316c7cf..bb721e797 100644 --- a/controllers/datadogagent/feature/cspm/feature_test.go +++ b/controllers/datadogagent/feature/cspm/feature_test.go @@ -87,11 +87,11 @@ func Test_cspmFeature_Configure(t *testing.T) { want := []*corev1.EnvVar{ { - Name: apicommon.DDComplianceEnabled, + Name: apicommon.DDComplianceConfigEnabled, Value: "true", }, { - Name: apicommon.DDComplianceCheckInterval, + Name: apicommon.DDComplianceConfigCheckInterval, Value: "1200000000000", }, } @@ -130,7 +130,7 @@ func Test_cspmFeature_Configure(t *testing.T) { want := []*corev1.EnvVar{ { - Name: apicommon.DDComplianceEnabled, + Name: apicommon.DDComplianceConfigEnabled, Value: "true", }, { @@ -138,7 +138,7 @@ func Test_cspmFeature_Configure(t *testing.T) { Value: apicommon.HostRootMountPath, }, { - Name: apicommon.DDComplianceCheckInterval, + Name: apicommon.DDComplianceConfigCheckInterval, Value: "1200000000000", }, } diff --git a/controllers/datadogagent/feature/enabledefault/feature.go b/controllers/datadogagent/feature/enabledefault/feature.go index 4baede6a0..d6ad0345b 100644 --- a/controllers/datadogagent/feature/enabledefault/feature.go +++ b/controllers/datadogagent/feature/enabledefault/feature.go @@ -6,9 +6,20 @@ package enabledefault import ( + "fmt" + + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" + "github.com/DataDog/datadog-operator/controllers/datadogagent/component" + componentdca "github.com/DataDog/datadog-operator/controllers/datadogagent/component/clusteragent" "github.com/DataDog/datadog-operator/controllers/datadogagent/feature" + "github.com/DataDog/datadog-operator/pkg/kubernetes" + "github.com/DataDog/datadog-operator/pkg/version" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/errors" ) func init() { @@ -19,13 +30,112 @@ func init() { } func buildDefaultFeature(options *feature.Options) feature.Feature { - return &defaultFeature{} + return &defaultFeature{ + credentialsInfo: credentialsInfo{ + secretCreation: secretInfo{ + data: make(map[string]string), + }, + }, + dcaTokenInfo: dcaTokenInfo{ + secretCreation: secretInfo{ + data: make(map[string]string), + }, + }, + } +} + +type defaultFeature struct { + namespace string + owner metav1.Object + + credentialsInfo credentialsInfo + dcaTokenInfo dcaTokenInfo + clusterAgent clusterAgentConfig + agent agentConfig + clusterCheckRunner clusterCheckRunnerConfig +} + +type credentialsInfo struct { + apiKey keyInfo + appKey keyInfo + secretCreation secretInfo +} + +type dcaTokenInfo struct { + token keyInfo + secretCreation secretInfo +} + +type keyInfo struct { + SecretName string + SecretKey string +} + +type secretInfo struct { + createSecret bool + name string + data map[string]string +} + +type clusterAgentConfig struct { + serviceAccountName string +} + +type agentConfig struct { + serviceAccountName string } -type defaultFeature struct{} +type clusterCheckRunnerConfig struct { + serviceAccountName string +} func (f *defaultFeature) Configure(dda *v2alpha1.DatadogAgent) feature.RequiredComponents { trueValue := true + f.namespace = dda.Namespace + f.owner = dda + + f.clusterAgent.serviceAccountName = v2alpha1.GetClusterAgentServiceAccount(dda) + f.agent.serviceAccountName = v2alpha1.GetAgentServiceAccount(dda) + f.clusterCheckRunner.serviceAccountName = v2alpha1.GetClusterChecksRunnerServiceAccount(dda) + + if dda.Spec.Global != nil { + if dda.Spec.Global.Credentials != nil { + creds := dda.Spec.Global.Credentials + + if creds.APIKey != nil || creds.AppKey != nil { + f.credentialsInfo.secretCreation.createSecret = true + f.credentialsInfo.secretCreation.name = v2alpha1.GetDefaultCredentialsSecretName(dda) + } + + if creds.APIKey != nil { + f.credentialsInfo.secretCreation.data[apicommon.DefaultAPIKeyKey] = *creds.APIKey + f.credentialsInfo.apiKey.SecretName = f.credentialsInfo.secretCreation.name + f.credentialsInfo.apiKey.SecretKey = apicommon.DefaultAPIKeyKey + } else if creds.APISecret != nil { + f.credentialsInfo.apiKey.SecretName = creds.APISecret.SecretName + f.credentialsInfo.apiKey.SecretKey = creds.APISecret.KeyName + } + + if creds.AppKey != nil { + f.credentialsInfo.secretCreation.data[apicommon.DefaultAPPKeyKey] = *creds.AppKey + f.credentialsInfo.appKey.SecretName = f.credentialsInfo.secretCreation.name + f.credentialsInfo.appKey.SecretKey = apicommon.DefaultAPPKeyKey + } else if creds.AppSecret != nil { + f.credentialsInfo.appKey.SecretName = creds.AppSecret.SecretName + f.credentialsInfo.appKey.SecretKey = creds.AppSecret.KeyName + } + } + + // DCA Token management + f.dcaTokenInfo.token.SecretKey = apicommon.DefaultTokenKey + f.dcaTokenInfo.token.SecretName = v2alpha1.GetDefaultDCATokenSecretName(dda) + if dda.Spec.Global.ClusterAgentToken != nil { + f.dcaTokenInfo.secretCreation.createSecret = true + f.dcaTokenInfo.secretCreation.name = f.dcaTokenInfo.token.SecretName + f.dcaTokenInfo.secretCreation.data[apicommon.DefaultTokenKey] = *dda.Spec.Global.ClusterAgentToken + } + } + return feature.RequiredComponents{ ClusterAgent: feature.RequiredComponent{ IsRequired: &trueValue, @@ -37,26 +147,177 @@ func (f *defaultFeature) Configure(dda *v2alpha1.DatadogAgent) feature.RequiredC } func (f *defaultFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) feature.RequiredComponents { - trueValue := true - return feature.RequiredComponents{ - ClusterAgent: feature.RequiredComponent{ - IsRequired: &trueValue, - }, - Agent: feature.RequiredComponent{ - IsRequired: &trueValue, - }, - } + /* + trueValue := true + f.owner = dda + f.namespace = dda.GetNamespace() + + required := feature.RequiredComponents{ + ClusterAgent: feature.RequiredComponent{ + IsRequired: &trueValue, + }, + Agent: feature.RequiredComponent{ + IsRequired: &trueValue, + }, + } + + f.clusterAgent.serviceAccountName = v1alpha1.GetClusterAgentServiceAccount(dda) + f.agent.serviceAccountName = v1alpha1.GetAgentServiceAccount(dda) + f.clusterCheckRunner.serviceAccountName = v1alpha1.GetClusterChecksRunnerServiceAccount(dda) + + // get info about credential + // If API key, app key _and_ token don't need a new secret, then don't create one. + if dda.Spec.Credentials != nil && + (!v1alpha1.CheckAPIKeySufficiency(&dda.Spec.Credentials.DatadogCredentials, config.DDAPIKeyEnvVar) || + !v1alpha1.CheckAppKeySufficiency(&dda.Spec.Credentials.DatadogCredentials, config.DDAppKeyEnvVar)) { + f.credentialsInfo.secretCreation.createSecret = true + f.credentialsInfo.secretCreation.name = v1alpha1.GetDefaultCredentialsSecretName(dda) + + creds := dda.Spec.Credentials + if creds.APIKey != "" { + f.credentialsInfo.secretCreation.data[apicommon.DefaultAPIKeyKey] = creds.APIKey + } + if creds.AppKey != "" { + f.credentialsInfo.secretCreation.data[apicommon.DefaultAPPKeyKey] = creds.AppKey + } + + // TOKEN management + f.dcaTokenInfo.secretCreation.createSecret = true + f.dcaTokenInfo.secretCreation.name = v1alpha1.GetDefaultCredentialsSecretName(dda) + f.dcaTokenInfo.token.SecretName = f.dcaTokenInfo.secretCreation.name + f.dcaTokenInfo.token.SecretKey = apicommon.DefaultTokenKey + if creds.Token != "" { + f.dcaTokenInfo.secretCreation.data[apicommon.DefaultTokenKey] = creds.Token + } else if apiutils.BoolValue(dda.Spec.ClusterAgent.Enabled) { + defaultedToken := v1alpha1.DefaultedClusterAgentToken(&dda.Status) + if defaultedToken != "" { + f.dcaTokenInfo.secretCreation.data[apicommon.DefaultTokenKey] = defaultedToken + } + } + } + */ + // to not apply this feature on v1alpha1 + // Else it break unittest in `controller_test.go` because the `store` modified the dependency resources with an additional labels. + // which make the comparison failing. + required := feature.RequiredComponents{} + + return required } // ManageDependencies allows a feature to manage its dependencies. // Feature's dependencies should be added in the store. func (f *defaultFeature) ManageDependencies(managers feature.ResourceManagers, components feature.RequiredComponents) error { - return nil + var errs []error + // manage credential secret + if f.credentialsInfo.secretCreation.createSecret { + for key, value := range f.credentialsInfo.secretCreation.data { + if err := managers.SecretManager().AddSecret(f.namespace, f.credentialsInfo.secretCreation.name, key, value); err != nil { + errs = append(errs, err) + } + } + if components.ClusterAgent.IsEnabled() && f.dcaTokenInfo.secretCreation.createSecret { + for key, value := range f.credentialsInfo.secretCreation.data { + if err := managers.SecretManager().AddSecret(f.namespace, f.dcaTokenInfo.secretCreation.name, key, value); err != nil { + errs = append(errs, err) + } + } + } + } + + // Create install-info configmap + installInfoCM := buildInstallInfoConfigMap(f.owner) + if err := managers.Store().AddOrUpdate(kubernetes.ConfigMapKind, installInfoCM); err != nil { + return err + } + + if components.Agent.IsEnabled() { + if err := f.agentDependencies(managers, components.Agent); err != nil { + errs = append(errs, err) + } + } + + if components.ClusterAgent.IsEnabled() { + if err := f.clusterAgentDependencies(managers, components.ClusterAgent); err != nil { + errs = append(errs, err) + } + } + + if components.ClusterChecksRunner.IsEnabled() { + if err := f.clusterChecksRunnerDependencies(managers, components.ClusterChecksRunner); err != nil { + errs = append(errs, err) + } + } + + return errors.NewAggregate(errs) +} + +func (f *defaultFeature) agentDependencies(managers feature.ResourceManagers, component feature.RequiredComponent) error { + _ = component + var errs []error + // serviceAccount + if f.agent.serviceAccountName != "" { + if err := managers.RBACManager().AddServiceAccount(f.namespace, f.agent.serviceAccountName); err != nil { + errs = append(errs, err) + } + } + return errors.NewAggregate(errs) +} + +func (f *defaultFeature) clusterAgentDependencies(managers feature.ResourceManagers, component feature.RequiredComponent) error { + _ = component + var errs []error + // serviceAccount + if f.clusterAgent.serviceAccountName != "" { + // Service Account creation + if err := managers.RBACManager().AddServiceAccount(f.namespace, f.clusterAgent.serviceAccountName); err != nil { + errs = append(errs, err) + } + + // Role Creation + if err := managers.RBACManager().AddPolicyRules(f.namespace, f.clusterAgent.serviceAccountName, componentdca.GetClusterAgentRbacResourcesName(f.owner), componentdca.GetDefaultClusterAgentRolePolicyRules(f.owner)); err != nil { + errs = append(errs, err) + } + + // ClusterRole creation + if err := managers.RBACManager().AddClusterPolicyRules(f.namespace, f.clusterAgent.serviceAccountName, componentdca.GetClusterAgentRbacResourcesName(f.owner), componentdca.GetDefaultClusterAgentClusterRolePolicyRules(f.owner)); err != nil { + errs = append(errs, err) + } + } + + return errors.NewAggregate(errs) +} + +func (f *defaultFeature) clusterChecksRunnerDependencies(managers feature.ResourceManagers, component feature.RequiredComponent) error { + _ = component + var errs []error + // serviceAccount + if f.clusterCheckRunner.serviceAccountName != "" { + if err := managers.RBACManager().AddServiceAccount(f.namespace, f.clusterCheckRunner.serviceAccountName); err != nil { + errs = append(errs, err) + } + } + + return errors.NewAggregate(errs) } // ManageClusterAgent allows a feature to configure the ClusterAgent's corev1.PodTemplateSpec // It should do nothing if the feature doesn't need to configure it. func (f *defaultFeature) ManageClusterAgent(managers feature.PodTemplateManagers) error { + // Add API/APP and Token secret envvar + if f.dcaTokenInfo.token.SecretName != "" { + tokenEnvVar := component.BuildEnvVarFromSource(apicommon.DDClusterAgentAuthToken, component.BuildEnvVarFromSecret(f.dcaTokenInfo.token.SecretName, f.dcaTokenInfo.token.SecretKey)) + managers.EnvVar().AddEnvVar(tokenEnvVar) + } + + if f.credentialsInfo.apiKey.SecretName != "" { + apiKeyEnvVar := component.BuildEnvVarFromSource(apicommon.DDAPIKey, component.BuildEnvVarFromSecret(f.credentialsInfo.apiKey.SecretName, f.credentialsInfo.apiKey.SecretKey)) + managers.EnvVar().AddEnvVar(apiKeyEnvVar) + } + if f.credentialsInfo.appKey.SecretName != "" { + appKeyEnvVar := component.BuildEnvVarFromSource(apicommon.DDAppKey, component.BuildEnvVarFromSecret(f.credentialsInfo.appKey.SecretName, f.credentialsInfo.appKey.SecretKey)) + managers.EnvVar().AddEnvVar(appKeyEnvVar) + } + return nil } @@ -69,3 +330,24 @@ func (f *defaultFeature) ManageNodeAgent(managers feature.PodTemplateManagers) e func (f *defaultFeature) ManageClusterChecksRunner(managers feature.PodTemplateManagers) error { return nil } + +func buildInstallInfoConfigMap(dda metav1.Object) *corev1.ConfigMap { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: component.GetInstallInfoConfigMapName(dda), + Namespace: dda.GetNamespace(), + }, + Data: map[string]string{ + "install_info": fmt.Sprintf(installInfoDataTmpl, version.Version), + }, + } + + return configMap +} + +const installInfoDataTmpl = `--- +install_method: + tool: datadog-operator + tool_version: datadog-operator + installer_version: %s +` diff --git a/controllers/datadogagent/feature/eventcollection/feature.go b/controllers/datadogagent/feature/eventcollection/feature.go index 862076f7a..b56bca42b 100644 --- a/controllers/datadogagent/feature/eventcollection/feature.go +++ b/controllers/datadogagent/feature/eventcollection/feature.go @@ -102,14 +102,14 @@ func (f *eventCollectionFeature) ManageDependencies(managers feature.ResourceMan // hardcoding leader election RBAC for now // can look into separating this out later if this needs to be configurable for other features - leaderElectionResourceName := utils.GetDatadogLeaderElectionResourceName(f.owner.GetName()) + leaderElectionResourceName := utils.GetDatadogLeaderElectionResourceName(f.owner) err := managers.RBACManager().AddClusterPolicyRules("", rbacName, f.serviceAccountName, getLeaderElectionRBACPolicyRules(leaderElectionResourceName)) if err != nil { return err } // event collection RBAC - tokenResourceName := utils.GetDatadogTokenResourceName(f.owner.GetName()) + tokenResourceName := utils.GetDatadogTokenResourceName(f.owner) return managers.RBACManager().AddClusterPolicyRules("", rbacName, f.serviceAccountName, getRBACPolicyRules(tokenResourceName)) } @@ -128,12 +128,12 @@ func (f *eventCollectionFeature) ManageClusterAgent(managers feature.PodTemplate managers.EnvVar().AddEnvVarToContainer(apicommonv1.ClusterAgentContainerName, &corev1.EnvVar{ Name: apicommon.DDLeaderLeaseName, - Value: utils.GetDatadogLeaderElectionResourceName(f.owner.GetName()), + Value: utils.GetDatadogLeaderElectionResourceName(f.owner), }) managers.EnvVar().AddEnvVarToContainer(apicommonv1.ClusterAgentContainerName, &corev1.EnvVar{ Name: apicommon.DDClusterAgentTokenName, - Value: utils.GetDatadogTokenResourceName(f.owner.GetName()), + Value: utils.GetDatadogTokenResourceName(f.owner), }) return nil @@ -154,12 +154,12 @@ func (f *eventCollectionFeature) ManageNodeAgent(managers feature.PodTemplateMan managers.EnvVar().AddEnvVarToContainer(apicommonv1.CoreAgentContainerName, &corev1.EnvVar{ Name: apicommon.DDLeaderLeaseName, - Value: utils.GetDatadogLeaderElectionResourceName(f.owner.GetName()), + Value: utils.GetDatadogLeaderElectionResourceName(f.owner), }) managers.EnvVar().AddEnvVarToContainer(apicommonv1.CoreAgentContainerName, &corev1.EnvVar{ Name: apicommon.DDClusterAgentTokenName, - Value: utils.GetDatadogTokenResourceName(f.owner.GetName()), + Value: utils.GetDatadogTokenResourceName(f.owner), }) return nil diff --git a/controllers/datadogagent/feature/factory.go b/controllers/datadogagent/feature/factory.go index 34dd16705..966cec175 100644 --- a/controllers/datadogagent/feature/factory.go +++ b/controllers/datadogagent/feature/factory.go @@ -79,7 +79,6 @@ func BuildFeaturesV1(dda *v1alpha1.DatadogAgent, options *Options) ([]Feature, R for _, id := range sortedkeys { feat := featureBuilders[id](options) - options.Logger.Info("test", "feature", id) // only add feat to the output if the feature is enabled config := feat.ConfigureV1(dda) if config.IsEnabled() { diff --git a/controllers/datadogagent/feature/kubernetesstatecore/feature.go b/controllers/datadogagent/feature/kubernetesstatecore/feature.go index b0eb23b29..ae20dbd9c 100644 --- a/controllers/datadogagent/feature/kubernetesstatecore/feature.go +++ b/controllers/datadogagent/feature/kubernetesstatecore/feature.go @@ -60,7 +60,7 @@ func (f *ksmFeature) Configure(dda *v2alpha1.DatadogAgent) feature.RequiredCompo f.owner = dda var output feature.RequiredComponents - if dda.Spec.Features.KubeStateMetricsCore != nil && apiutils.BoolValue(dda.Spec.Features.KubeStateMetricsCore.Enabled) { + if dda.Spec.Features != nil && dda.Spec.Features.KubeStateMetricsCore != nil && apiutils.BoolValue(dda.Spec.Features.KubeStateMetricsCore.Enabled) { output.ClusterAgent.IsRequired = apiutils.NewBoolPointer(true) if dda.Spec.Features.KubeStateMetricsCore.Conf != nil { @@ -125,7 +125,9 @@ func (f *ksmFeature) ManageDependencies(managers feature.ResourceManagers, compo return err } if configCM != nil { - managers.Store().AddOrUpdate(kubernetes.ConfigMapKind, configCM) + if err := managers.Store().AddOrUpdate(kubernetes.ConfigMapKind, configCM); err != nil { + return err + } } // Manager RBAC permission diff --git a/controllers/datadogagent/feature/logcollection/feature.go b/controllers/datadogagent/feature/logcollection/feature.go index 402e4cbc9..4431bd8be 100644 --- a/controllers/datadogagent/feature/logcollection/feature.go +++ b/controllers/datadogagent/feature/logcollection/feature.go @@ -45,6 +45,10 @@ type logCollectionFeature struct { // Configure is used to configure the feature from a v2alpha1.DatadogAgent instance. func (f *logCollectionFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) { + if dda.Spec.Features == nil { + return + } + logCollection := dda.Spec.Features.LogCollection if logCollection != nil && apiutils.BoolValue(logCollection.Enabled) { diff --git a/controllers/datadogagent/feature/npm/feature.go b/controllers/datadogagent/feature/npm/feature.go index b99e0fe58..70baca02b 100644 --- a/controllers/datadogagent/feature/npm/feature.go +++ b/controllers/datadogagent/feature/npm/feature.go @@ -35,6 +35,10 @@ type npmFeature struct{} // Configure is used to configure the feature from a v2alpha1.DatadogAgent instance. func (f *npmFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) { + if dda.Spec.Features == nil { + return + } + if dda.Spec.Features.NPM != nil && apiutils.BoolValue(dda.Spec.Features.NPM.Enabled) { reqComp = feature.RequiredComponents{ Agent: feature.RequiredComponent{ @@ -128,14 +132,14 @@ func (f *npmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) error // env vars enableEnvVar := &corev1.EnvVar{ - Name: apicommon.DDSystemProbeNPMEnabledEnvVar, + Name: apicommon.DDSystemProbeNPMEnabled, Value: "true", } managers.EnvVar().AddEnvVarToContainer(apicommonv1.ProcessAgentContainerName, enableEnvVar) managers.EnvVar().AddEnvVarToContainer(apicommonv1.SystemProbeContainerName, enableEnvVar) sysProbeEnableEnvVar := &corev1.EnvVar{ - Name: apicommon.DDSystemProbeEnabledEnvVar, + Name: apicommon.DDSystemProbeEnabled, Value: "true", } managers.EnvVar().AddEnvVarToContainer(apicommonv1.ProcessAgentContainerName, sysProbeEnableEnvVar) diff --git a/controllers/datadogagent/feature/npm/feature_test.go b/controllers/datadogagent/feature/npm/feature_test.go index 172abb44b..4946cf0b8 100644 --- a/controllers/datadogagent/feature/npm/feature_test.go +++ b/controllers/datadogagent/feature/npm/feature_test.go @@ -159,11 +159,11 @@ func Test_npmFeature_Configure(t *testing.T) { // check env vars sysProbeWantEnvVars := []*corev1.EnvVar{ { - Name: apicommon.DDSystemProbeNPMEnabledEnvVar, + Name: apicommon.DDSystemProbeNPMEnabled, Value: "true", }, { - Name: apicommon.DDSystemProbeEnabledEnvVar, + Name: apicommon.DDSystemProbeEnabled, Value: "true", }, { @@ -185,7 +185,6 @@ func Test_npmFeature_Configure(t *testing.T) { processAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ProcessAgentContainerName] assert.True(t, apiutils.IsEqualStruct(processAgentEnvVars, processWantEnvVars), "Process Agent envvars \ndiff = %s", cmp.Diff(processAgentEnvVars, processWantEnvVars)) - } tests := test.FeatureTestSuite{ diff --git a/controllers/datadogagent/feature/oom_kill/feature.go b/controllers/datadogagent/feature/oom_kill/feature.go index 82e36bfc5..2e711caf2 100644 --- a/controllers/datadogagent/feature/oom_kill/feature.go +++ b/controllers/datadogagent/feature/oom_kill/feature.go @@ -35,7 +35,7 @@ type oomKillFeature struct{} // Configure is used to configure the feature from a v2alpha1.DatadogAgent instance. func (f *oomKillFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) { - if dda.Spec.Features.OOMKill != nil && apiutils.BoolValue(dda.Spec.Features.OOMKill.Enabled) { + if dda.Spec.Features != nil && dda.Spec.Features.OOMKill != nil && apiutils.BoolValue(dda.Spec.Features.OOMKill.Enabled) { reqComp.Agent = feature.RequiredComponent{ IsRequired: apiutils.NewBoolPointer(true), Containers: []apicommonv1.AgentContainerName{apicommonv1.CoreAgentContainerName, apicommonv1.SystemProbeContainerName}, diff --git a/controllers/datadogagent/feature/prometheus_scrape/feature.go b/controllers/datadogagent/feature/prometheus_scrape/feature.go index 6d0c755de..d7f96b2f3 100644 --- a/controllers/datadogagent/feature/prometheus_scrape/feature.go +++ b/controllers/datadogagent/feature/prometheus_scrape/feature.go @@ -39,6 +39,10 @@ type prometheusScrapeFeature struct { // Configure is used to configure the feature from a v2alpha1.DatadogAgent instance. func (f *prometheusScrapeFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) { + if dda.Spec.Features == nil { + return + } + prometheusScrape := dda.Spec.Features.PrometheusScrape if prometheusScrape != nil && apiutils.BoolValue(prometheusScrape.Enabled) { diff --git a/controllers/datadogagent/feature/tcp_queue_length/feature.go b/controllers/datadogagent/feature/tcp_queue_length/feature.go index e283d11c0..ba6afd211 100644 --- a/controllers/datadogagent/feature/tcp_queue_length/feature.go +++ b/controllers/datadogagent/feature/tcp_queue_length/feature.go @@ -35,6 +35,9 @@ type tcpQueueLengthFeature struct{} // Configure is used to configure the feature from a v2alpha1.DatadogAgent instance. func (f *tcpQueueLengthFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) { + if dda.Spec.Features == nil { + return + } if dda.Spec.Features.TCPQueueLength != nil && apiutils.BoolValue(dda.Spec.Features.TCPQueueLength.Enabled) { reqComp.Agent = feature.RequiredComponent{ IsRequired: apiutils.NewBoolPointer(true), diff --git a/controllers/datadogagent/feature/test/testsuite.go b/controllers/datadogagent/feature/test/testsuite.go index 065d0576a..2f338a2c1 100644 --- a/controllers/datadogagent/feature/test/testsuite.go +++ b/controllers/datadogagent/feature/test/testsuite.go @@ -6,6 +6,17 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + edsdatadoghqv1alpha1 "github.com/DataDog/extendeddaemonset/api/v1alpha1" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" "github.com/DataDog/datadog-operator/controllers/datadogagent/dependencies" @@ -57,6 +68,33 @@ func (suite FeatureTestSuite) Run(t *testing.T, buildFunc feature.BuildFunc) { } } +// testScheme return a runtime.Scheme for testing purpose +func testScheme(isV2 bool) *runtime.Scheme { + s := runtime.NewScheme() + s.AddKnownTypes(edsdatadoghqv1alpha1.GroupVersion, &edsdatadoghqv1alpha1.ExtendedDaemonSet{}) + s.AddKnownTypes(appsv1.SchemeGroupVersion, &appsv1.DaemonSet{}) + s.AddKnownTypes(appsv1.SchemeGroupVersion, &appsv1.Deployment{}) + s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.Secret{}) + s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.ServiceAccount{}) + s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.ConfigMap{}) + s.AddKnownTypes(rbacv1.SchemeGroupVersion, &rbacv1.ClusterRoleBinding{}) + s.AddKnownTypes(rbacv1.SchemeGroupVersion, &rbacv1.ClusterRole{}) + s.AddKnownTypes(rbacv1.SchemeGroupVersion, &rbacv1.Role{}) + s.AddKnownTypes(rbacv1.SchemeGroupVersion, &rbacv1.RoleBinding{}) + s.AddKnownTypes(policyv1.SchemeGroupVersion, &policyv1.PodDisruptionBudget{}) + s.AddKnownTypes(apiregistrationv1.SchemeGroupVersion, &apiregistrationv1.APIServiceList{}) + s.AddKnownTypes(apiregistrationv1.SchemeGroupVersion, &apiregistrationv1.APIService{}) + s.AddKnownTypes(networkingv1.SchemeGroupVersion, &networkingv1.NetworkPolicy{}) + + if isV2 { + s.AddKnownTypes(v2alpha1.GroupVersion, &v2alpha1.DatadogAgent{}) + } else { + s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.DatadogAgent{}) + } + + return s +} + func runTest(t *testing.T, tt FeatureTest, buildFunc feature.BuildFunc) { logf.SetLogger(zap.New(zap.UseDevMode(true))) logger := logf.Log.WithName(tt.Name) @@ -67,10 +105,15 @@ func runTest(t *testing.T, tt FeatureTest, buildFunc feature.BuildFunc) { // check feature Configure function var gotConfigure feature.RequiredComponents + var dda metav1.Object + var isV2 bool if tt.DDAv2 != nil { gotConfigure = f.Configure(tt.DDAv2) + dda = tt.DDAv2 + isV2 = true } else if tt.DDAv1 != nil { gotConfigure = f.ConfigureV1(tt.DDAv1) + dda = tt.DDAv1 } else { t.Fatal("No DatadogAgent CRD provided") } @@ -84,8 +127,17 @@ func runTest(t *testing.T, tt FeatureTest, buildFunc feature.BuildFunc) { return } + if tt.StoreOption == nil { + tt.StoreOption = &dependencies.StoreOptions{ + Logger: logger, + } + } + if tt.StoreOption.Scheme == nil { + tt.StoreOption.Scheme = testScheme(isV2) + } + // dependencies - store := dependencies.NewStore(tt.StoreOption) + store := dependencies.NewStore(dda, tt.StoreOption) if tt.StoreInitFunc != nil { tt.StoreInitFunc(store) } diff --git a/controllers/datadogagent/feature/usm/feature.go b/controllers/datadogagent/feature/usm/feature.go index c5d3bab1a..72e1bd321 100644 --- a/controllers/datadogagent/feature/usm/feature.go +++ b/controllers/datadogagent/feature/usm/feature.go @@ -35,7 +35,7 @@ type usmFeature struct{} // Configure is used to configure the feature from a v2alpha1.DatadogAgent instance. func (f *usmFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) { - if dda.Spec.Features.USM != nil && apiutils.BoolValue(dda.Spec.Features.USM.Enabled) { + if dda.Spec.Features != nil && dda.Spec.Features.USM != nil && apiutils.BoolValue(dda.Spec.Features.USM.Enabled) { reqComp = feature.RequiredComponents{ Agent: feature.RequiredComponent{ IsRequired: apiutils.NewBoolPointer(true), diff --git a/controllers/datadogagent/merger/rbac.go b/controllers/datadogagent/merger/rbac.go index a2486bd0e..8af12158e 100644 --- a/controllers/datadogagent/merger/rbac.go +++ b/controllers/datadogagent/merger/rbac.go @@ -12,11 +12,13 @@ import ( "github.com/DataDog/datadog-operator/pkg/kubernetes" "github.com/DataDog/datadog-operator/pkg/kubernetes/rbac" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" ) // RBACManager use to manage RBAC resources. type RBACManager interface { + AddServiceAccount(namespace string, name string) error AddPolicyRules(namespace string, roleName string, saName string, policies []rbacv1.PolicyRule) error AddClusterPolicyRules(namespace string, roleName string, saName string, policies []rbacv1.PolicyRule) error } @@ -34,6 +36,17 @@ type rbacManagerImpl struct { store dependencies.StoreClient } +// AddServiceAccount use to create a ServiceAccount +func (m *rbacManagerImpl) AddServiceAccount(namespace string, name string) error { + obj, _ := m.store.GetOrCreate(kubernetes.ServiceAccountsKind, namespace, name) + sa, ok := obj.(*corev1.ServiceAccount) + if !ok { + return fmt.Errorf("unable to get from the store the ServiceAccount %s/%s", namespace, name) + } + + return m.store.AddOrUpdate(kubernetes.ServiceAccountsKind, sa) +} + // AddPolicyRules use to add PolicyRules to a Role. It also create the RoleBinding. func (m *rbacManagerImpl) AddPolicyRules(namespace string, roleName string, saName string, policies []rbacv1.PolicyRule) error { obj, _ := m.store.GetOrCreate(kubernetes.RolesKind, namespace, roleName) @@ -44,7 +57,9 @@ func (m *rbacManagerImpl) AddPolicyRules(namespace string, roleName string, saNa // TODO: can be improve by checking if the policies don't already existe. role.Rules = append(role.Rules, policies...) - m.store.AddOrUpdate(kubernetes.RolesKind, role) + if err := m.store.AddOrUpdate(kubernetes.RolesKind, role); err != nil { + return err + } bindingObj, _ := m.store.GetOrCreate(kubernetes.RoleBindingKind, namespace, roleName) roleBinding, ok := bindingObj.(*rbacv1.RoleBinding) @@ -54,7 +69,7 @@ func (m *rbacManagerImpl) AddPolicyRules(namespace string, roleName string, saNa roleBinding.RoleRef = rbacv1.RoleRef{ APIGroup: rbac.RbacAPIGroup, - Kind: rbac.ClusterRoleKind, + Kind: rbac.RoleKind, Name: roleName, } found := false @@ -71,7 +86,9 @@ func (m *rbacManagerImpl) AddPolicyRules(namespace string, roleName string, saNa Namespace: namespace, }) } - m.store.AddOrUpdate(kubernetes.RoleBindingKind, roleBinding) + if err := m.store.AddOrUpdate(kubernetes.RoleBindingKind, roleBinding); err != nil { + return err + } return nil } @@ -86,7 +103,9 @@ func (m *rbacManagerImpl) AddClusterPolicyRules(namespace string, roleName strin // TODO: can be improve by checking if the policies don't already existe. clusterRole.Rules = append(clusterRole.Rules, policies...) - m.store.AddOrUpdate(kubernetes.ClusterRolesKind, clusterRole) + if err := m.store.AddOrUpdate(kubernetes.ClusterRolesKind, clusterRole); err != nil { + return err + } bindingObj, _ := m.store.GetOrCreate(kubernetes.ClusterRoleBindingKind, "", roleName) clusterRoleBinding, ok := bindingObj.(*rbacv1.ClusterRoleBinding) @@ -113,7 +132,9 @@ func (m *rbacManagerImpl) AddClusterPolicyRules(namespace string, roleName strin Namespace: namespace, }) } - m.store.AddOrUpdate(kubernetes.ClusterRoleBindingKind, clusterRoleBinding) + if err := m.store.AddOrUpdate(kubernetes.ClusterRoleBindingKind, clusterRoleBinding); err != nil { + return err + } return nil } diff --git a/controllers/datadogagent/merger/rbac_test.go b/controllers/datadogagent/merger/rbac_test.go index 40eb9b3a1..8291e99bc 100644 --- a/controllers/datadogagent/merger/rbac_test.go +++ b/controllers/datadogagent/merger/rbac_test.go @@ -8,10 +8,13 @@ package merger import ( "testing" + "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" "github.com/DataDog/datadog-operator/controllers/datadogagent/dependencies" "github.com/DataDog/datadog-operator/pkg/kubernetes" + rbacv1 "k8s.io/api/rbac/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) func TestRBACManager_AddPolicyRules(t *testing.T) { @@ -54,6 +57,19 @@ func TestRBACManager_AddPolicyRules(t *testing.T) { }, } + testScheme := runtime.NewScheme() + testScheme.AddKnownTypes(v2alpha1.GroupVersion, &v2alpha1.DatadogAgent{}) + storeOptions := &dependencies.StoreOptions{ + Scheme: testScheme, + } + + owner := &v2alpha1.DatadogAgent{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + } + type args struct { namespace string roleName string @@ -69,7 +85,7 @@ func TestRBACManager_AddPolicyRules(t *testing.T) { }{ { name: "empty store", - store: dependencies.NewStore(nil), + store: dependencies.NewStore(owner, storeOptions), args: args{ namespace: ns, saName: name + "sa", @@ -91,7 +107,7 @@ func TestRBACManager_AddPolicyRules(t *testing.T) { }, { name: "another Role already exist", - store: dependencies.NewStore(nil).AddOrUpdateStore(kubernetes.RolesKind, role1), + store: dependencies.NewStore(owner, storeOptions).AddOrUpdateStore(kubernetes.RolesKind, role1), args: args{ namespace: ns, saName: name + "sa", @@ -113,7 +129,7 @@ func TestRBACManager_AddPolicyRules(t *testing.T) { }, { name: "update existing Role", - store: dependencies.NewStore(nil).AddOrUpdateStore(kubernetes.RolesKind, role2), + store: dependencies.NewStore(owner, storeOptions).AddOrUpdateStore(kubernetes.RolesKind, role2), args: args{ namespace: ns, saName: name + "sa", @@ -174,6 +190,20 @@ func TestRBACManager_AddClusterPolicyRules(t *testing.T) { rule1, }, } + + testScheme := runtime.NewScheme() + testScheme.AddKnownTypes(v2alpha1.GroupVersion, &v2alpha1.DatadogAgent{}) + storeOptions := &dependencies.StoreOptions{ + Scheme: testScheme, + } + + owner := &v2alpha1.DatadogAgent{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + } + type fields struct { store dependencies.StoreClient } @@ -192,7 +222,7 @@ func TestRBACManager_AddClusterPolicyRules(t *testing.T) { }{ { name: "empty store", - store: dependencies.NewStore(nil), + store: dependencies.NewStore(owner, storeOptions), args: args{ namespace: ns, saName: name + "sa", @@ -214,7 +244,7 @@ func TestRBACManager_AddClusterPolicyRules(t *testing.T) { }, { name: "another ClusterRole already exist", - store: dependencies.NewStore(nil).AddOrUpdateStore(kubernetes.RolesKind, role1), + store: dependencies.NewStore(owner, storeOptions).AddOrUpdateStore(kubernetes.RolesKind, role1), args: args{ namespace: ns, saName: name + "sa", diff --git a/controllers/datadogagent/merger/secret.go b/controllers/datadogagent/merger/secret.go index 845a0ed36..35fc1a98c 100644 --- a/controllers/datadogagent/merger/secret.go +++ b/controllers/datadogagent/merger/secret.go @@ -43,6 +43,5 @@ func (m *secretManagerImpl) AddSecret(secretNamespace, secretName, key, value st } secret.Data[key] = []byte(value) - m.store.AddOrUpdate(kubernetes.SecretsKind, secret) - return nil + return m.store.AddOrUpdate(kubernetes.SecretsKind, secret) } diff --git a/controllers/datadogagent/merger/secret_test.go b/controllers/datadogagent/merger/secret_test.go index a82709078..f45886d47 100644 --- a/controllers/datadogagent/merger/secret_test.go +++ b/controllers/datadogagent/merger/secret_test.go @@ -8,16 +8,32 @@ package merger import ( "testing" + "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" "github.com/DataDog/datadog-operator/controllers/datadogagent/dependencies" "github.com/DataDog/datadog-operator/pkg/kubernetes" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) func Test_secretManagerImpl_AddSecret(t *testing.T) { secretNs := "foo" secretName := "bar" + owner := &v2alpha1.DatadogAgent{ + ObjectMeta: v1.ObjectMeta{ + Namespace: secretNs, + Name: secretName, + }, + } + + testScheme := runtime.NewScheme() + testScheme.AddKnownTypes(v2alpha1.GroupVersion, &v2alpha1.DatadogAgent{}) + storeOptions := &dependencies.StoreOptions{ + Scheme: testScheme, + } + secret1 := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, @@ -43,7 +59,7 @@ func Test_secretManagerImpl_AddSecret(t *testing.T) { }{ { name: "empty Store", - store: dependencies.NewStore(nil), + store: dependencies.NewStore(owner, storeOptions), args: args{ secretNamespace: secretNs, secretName: secretName, @@ -59,7 +75,7 @@ func Test_secretManagerImpl_AddSecret(t *testing.T) { }, { name: "secret already exist", - store: dependencies.NewStore(nil).AddOrUpdateStore(kubernetes.SecretsKind, secret1), + store: dependencies.NewStore(owner, storeOptions).AddOrUpdateStore(kubernetes.SecretsKind, secret1), args: args{ secretNamespace: secretNs, secretName: secretName, @@ -69,9 +85,12 @@ func Test_secretManagerImpl_AddSecret(t *testing.T) { wantErr: false, validateFunc: func(t *testing.T, store *dependencies.Store) { obj, found := store.Get(kubernetes.SecretsKind, secretNs, secretName) - secret, _ := obj.(*corev1.Secret) + secret, ok := obj.(*corev1.Secret) + if !ok { + t.Fatalf("unable to cast the obj to a Secret %s/%s", secretNs, secretName) + } if !found { - t.Errorf("missing Secret %s/%s", secretNs, secretName) + t.Fatalf("missing Secret %s/%s", secretNs, secretName) } if _, ok := secret.Data["key1"]; !ok { t.Errorf("default key1 not found in Secret %s/%s", secretNs, secretName) diff --git a/controllers/datadogagent/object/owner_ref.go b/controllers/datadogagent/object/owner_ref.go new file mode 100644 index 000000000..19e2063fb --- /dev/null +++ b/controllers/datadogagent/object/owner_ref.go @@ -0,0 +1,77 @@ +package object + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// CheckOwnerReference return true if owner is the owner of the object +func CheckOwnerReference(owner, object metav1.Object) bool { + return metav1.IsControlledBy(object, owner) +} + +// SetOwnerReference sets owner as a OwnerReference. +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) + } + + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + + // Create a new ref + ref := *newOwnerRef(owner, schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind}) + + existingRefs := object.GetOwnerReferences() + fi := -1 + for i, r := range existingRefs { + if referSameObject(ref, r) { + fi = i + } + } + if fi == -1 { + existingRefs = append(existingRefs, ref) + } else { + existingRefs[fi] = ref + } + + // Update owner references + object.SetOwnerReferences(existingRefs) + return nil +} + +// newOwnerRef creates an OwnerReference pointing to the given owner. +func newOwnerRef(owner metav1.Object, gvk schema.GroupVersionKind) *metav1.OwnerReference { + blockOwnerDeletion := true + isController := true + return &metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} + +// Returns true if a and b point to the same object +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV == bGV && a.Kind == b.Kind && a.Name == b.Name +} diff --git a/controllers/datadogagent/secret_agent.go b/controllers/datadogagent/secret_agent.go index 275e37d91..8b6218863 100644 --- a/controllers/datadogagent/secret_agent.go +++ b/controllers/datadogagent/secret_agent.go @@ -15,11 +15,10 @@ import ( datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" "github.com/DataDog/datadog-operator/controllers/datadogagent/object" "github.com/DataDog/datadog-operator/pkg/config" - "github.com/DataDog/datadog-operator/pkg/controller/utils" ) func (r *Reconciler) manageAgentSecret(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) (reconcile.Result, error) { - return r.manageSecret(logger, managedSecret{name: utils.GetDefaultCredentialsSecretName(dda), requireFunc: needAgentSecret, createFunc: newAgentSecret}, dda) + return r.manageSecret(logger, managedSecret{name: datadoghqv1alpha1.GetDefaultCredentialsSecretName(dda), requireFunc: needAgentSecret, createFunc: newAgentSecret}, dda) } func newAgentSecret(name string, dda *datadoghqv1alpha1.DatadogAgent) *corev1.Secret { diff --git a/controllers/datadogagent/secret_clusteragent.go b/controllers/datadogagent/secret_clusteragent.go index 1491d0c4a..f78f130bd 100644 --- a/controllers/datadogagent/secret_clusteragent.go +++ b/controllers/datadogagent/secret_clusteragent.go @@ -52,8 +52,8 @@ func needExternalMetricsSecret(dda *datadoghqv1alpha1.DatadogAgent) bool { } // If API key and app key don't need a new secret, then don't create one. - if datadoghqv1alpha1.CheckAPIKeySufficiency(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, datadoghqv1alpha1.DDExternalMetricsProviderAPIKey) && - datadoghqv1alpha1.CheckAppKeySufficiency(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, datadoghqv1alpha1.DDExternalMetricsProviderAPIKey) { + if datadoghqv1alpha1.CheckAPIKeySufficiency(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, apicommon.DDExternalMetricsProviderAPIKey) && + datadoghqv1alpha1.CheckAppKeySufficiency(dda.Spec.ClusterAgent.Config.ExternalMetrics.Credentials, apicommon.DDExternalMetricsProviderAPIKey) { return false } diff --git a/controllers/datadogagent/service.go b/controllers/datadogagent/service.go index b3d89187c..2fcde6cd1 100644 --- a/controllers/datadogagent/service.go +++ b/controllers/datadogagent/service.go @@ -22,7 +22,7 @@ import ( apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" - "github.com/DataDog/datadog-operator/controllers/datadogagent/component" + componentdca "github.com/DataDog/datadog-operator/controllers/datadogagent/component/clusteragent" "github.com/DataDog/datadog-operator/controllers/datadogagent/object" "github.com/DataDog/datadog-operator/pkg/controller/utils/comparison" "github.com/DataDog/datadog-operator/pkg/controller/utils/datadog" @@ -36,7 +36,7 @@ func (r *Reconciler) manageClusterAgentService(logger logr.Logger, dda *datadogh return r.cleanupClusterAgentService(dda) } - serviceName := component.GetClusterAgentServiceName(dda) + serviceName := componentdca.GetClusterAgentServiceName(dda) service := &corev1.Service{} err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: dda.Namespace, Name: serviceName}, service) if err != nil { @@ -60,7 +60,7 @@ func (r *Reconciler) updateIfNeededClusterAgentService(logger logr.Logger, dda * } func (r *Reconciler) cleanupClusterAgentService(dda *datadoghqv1alpha1.DatadogAgent) (reconcile.Result, error) { - serviceName := component.GetClusterAgentServiceName(dda) + serviceName := componentdca.GetClusterAgentServiceName(dda) return cleanupService(r.client, serviceName, dda.Namespace, dda) } @@ -70,7 +70,7 @@ func newClusterAgentService(dda *datadoghqv1alpha1.DatadogAgent) *corev1.Service service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: component.GetClusterAgentServiceName(dda), + Name: componentdca.GetClusterAgentServiceName(dda), Namespace: dda.Namespace, Labels: labels, Annotations: annotations, diff --git a/controllers/datadogagent/utils.go b/controllers/datadogagent/utils.go index 2c620c786..f928f334b 100644 --- a/controllers/datadogagent/utils.go +++ b/controllers/datadogagent/utils.go @@ -312,7 +312,7 @@ func getAgentContainer(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, if agentSpec.HostNetwork { udpPort.ContainerPort = *agentSpec.Config.HostPort envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDDogstatsdPort, + Name: apicommon.DDDogstatsdPort, Value: strconv.Itoa(int(*agentSpec.Config.HostPort)), }) } @@ -575,7 +575,7 @@ func getConfigInitContainers(spec *datadoghqv1alpha1.DatadogAgentSpec, volumeMou func getEnvVarDogstatsdSocket(dda *datadoghqv1alpha1.DatadogAgent) corev1.EnvVar { return corev1.EnvVar{ - Name: datadoghqv1alpha1.DDDogstatsdSocket, + Name: apicommon.DDDogstatsdSocket, Value: getLocalFilepath(*dda.Spec.Agent.Config.Dogstatsd.UnixDomainSocket.HostFilepath, localDogstatsdSocketPath), } } @@ -593,7 +593,7 @@ func getEnvVarsForAPMAgent(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.EnvVar // APM Unix Domain Socket configuration if apiutils.BoolValue(dda.Spec.Agent.Apm.UnixDomainSocket.Enabled) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDPPMReceiverSocket, + Name: apicommon.DDPPMReceiverSocket, Value: getLocalFilepath(*dda.Spec.Agent.Apm.UnixDomainSocket.HostFilepath, localAPMSocketPath), }) } @@ -611,7 +611,7 @@ func getEnvVarsForAPMAgent(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.EnvVar func getEnvVarsForProcessAgent(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.EnvVar, error) { envVars := []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDSystemProbeAgentEnabled, + Name: apicommon.DDSystemProbeAgentEnabled, Value: strconv.FormatBool(isSystemProbeEnabled(&dda.Spec)), }, getEnvVarDogstatsdSocket(dda), @@ -619,11 +619,11 @@ func getEnvVarsForProcessAgent(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.En if isSystemProbeEnabled(&dda.Spec) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDSystemProbeSocketPath, + Name: apicommon.DDSystemProbeSocket, Value: filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock"), }) - envVars = addBoolEnVar(isNetworkMonitoringEnabled(&dda.Spec), datadoghqv1alpha1.DDSystemProbeNPMEnabled, envVars) + envVars = addBoolEnVar(isNetworkMonitoringEnabled(&dda.Spec), apicommon.DDSystemProbeNPMEnabled, envVars) } if processCollectionEnabled(dda) { @@ -664,39 +664,39 @@ func getEnvVarsForSystemProbe(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.Env envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDSystemProbeDebugPort, + Name: apicommon.DDSystemProbeDebugPort, Value: strconv.FormatInt(int64(dda.Spec.Agent.SystemProbe.DebugPort), 10), }, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDSystemProbeSocketPath, + Name: apicommon.DDSystemProbeSocket, Value: filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock"), }, ) // We do not set env vars to false if *bool is nil as it will override content from config file - envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.ConntrackEnabled, datadoghqv1alpha1.DDSystemProbeConntrackEnabled, envVars) - envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.BPFDebugEnabled, datadoghqv1alpha1.DDSystemProbeBPFDebugEnabled, envVars) - envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableTCPQueueLength, datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, envVars) - envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableOOMKill, datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, envVars) - envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.CollectDNSStats, datadoghqv1alpha1.DDSystemProbeCollectDNSStatsEnabled, envVars) - envVars = addBoolEnVar(isNetworkMonitoringEnabled(&dda.Spec), datadoghqv1alpha1.DDSystemProbeNPMEnabled, envVars) - envVars = addBoolEnVar(isRuntimeSecurityEnabled(&dda.Spec), datadoghqv1alpha1.DDRuntimeSecurityConfigEnabled, envVars) - envVars = addBoolEnVar(isSyscallMonitorEnabled(&dda.Spec), datadoghqv1alpha1.DDRuntimeSecurityConfigSyscallMonitorEnabled, envVars) + envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.ConntrackEnabled, apicommon.DDSystemProbeConntrackEnabled, envVars) + envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.BPFDebugEnabled, apicommon.DDSystemProbeBPFDebugEnabled, envVars) + envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableTCPQueueLength, apicommon.DDSystemProbeTCPQueueLengthEnabled, envVars) + envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableOOMKill, apicommon.DDSystemProbeOOMKillEnabled, envVars) + envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.CollectDNSStats, apicommon.DDSystemProbeCollectDNSStatsEnabled, envVars) + envVars = addBoolEnVar(isNetworkMonitoringEnabled(&dda.Spec), apicommon.DDSystemProbeNPMEnabled, envVars) + envVars = addBoolEnVar(isRuntimeSecurityEnabled(&dda.Spec), apicommon.DDRuntimeSecurityConfigEnabled, envVars) + envVars = addBoolEnVar(isSyscallMonitorEnabled(&dda.Spec), apicommon.DDRuntimeSecurityConfigSyscallMonitorEnabled, envVars) // For now don't expose the remote_tagger setting to user, since it is an implementation detail. - envVars = addBoolEnVar(isRuntimeSecurityEnabled(&dda.Spec), datadoghqv1alpha1.DDRuntimeSecurityConfigRemoteTaggerEnabled, envVars) + envVars = addBoolEnVar(isRuntimeSecurityEnabled(&dda.Spec), apicommon.DDRuntimeSecurityConfigRemoteTaggerEnabled, envVars) if isRuntimeSecurityEnabled(&dda.Spec) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigSocket, + Name: apicommon.DDRuntimeSecurityConfigSocket, Value: filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "runtime-security.sock"), }, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigPoliciesDir, + Name: apicommon.DDRuntimeSecurityConfigPoliciesDir, Value: datadoghqv1alpha1.SecurityAgentRuntimePoliciesDirVolumePath, }, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAuthTokenFilePath, + Name: apicommon.DDAuthTokenFilePath, Value: filepath.Join(apicommon.AuthVolumePath, "token"), }, ) @@ -709,7 +709,7 @@ func getEnvVarsForSystemProbe(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.Env func getEnvVarsCommon(dda *datadoghqv1alpha1.DatadogAgent, needAPIKey bool) ([]corev1.EnvVar, error) { envVars := []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDLogLevel, + Name: apicommon.DDLogLevel, Value: getLogLevel(dda), }, { @@ -722,14 +722,14 @@ func getEnvVarsCommon(dda *datadoghqv1alpha1.DatadogAgent, needAPIKey bool) ([]c if dda.Spec.ClusterName != "" { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDClusterName, + Name: apicommon.DDClusterName, Value: dda.Spec.ClusterName, }) } if needAPIKey { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDAPIKey, + Name: apicommon.DDAPIKey, ValueFrom: getAPIKeyFromSecret(dda), }) } @@ -741,27 +741,27 @@ func getEnvVarsCommon(dda *datadoghqv1alpha1.DatadogAgent, needAPIKey bool) ([]c } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDTags, + Name: apicommon.DDTags, Value: string(tags), }) } if dda.Spec.Agent.Config.DDUrl != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDddURL, + Name: apicommon.DDddURL, Value: *dda.Spec.Agent.Config.DDUrl, }) } if dda.Spec.Agent.Config.CriSocket != nil { if dda.Spec.Agent.Config.CriSocket.CriSocketPath != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDCriSocketPath, + Name: apicommon.DDCriSocketPath, Value: filepath.Join(datadoghqv1alpha1.HostCriSocketPathPrefix, *dda.Spec.Agent.Config.CriSocket.CriSocketPath), }) } if dda.Spec.Agent.Config.CriSocket.DockerSocketPath != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DockerHost, + Name: apicommon.DockerHost, Value: "unix://" + filepath.Join(datadoghqv1alpha1.HostCriSocketPathPrefix, *dda.Spec.Agent.Config.CriSocket.DockerSocketPath), }) } @@ -771,7 +771,7 @@ func getEnvVarsCommon(dda *datadoghqv1alpha1.DatadogAgent, needAPIKey bool) ([]c if dda.Spec.Site != "" { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDSite, + Name: apicommon.DDSite, Value: dda.Spec.Site, }) } @@ -786,21 +786,21 @@ func getEnvVarsForLogCollection(logSpec *datadoghqv1alpha1.LogCollectionConfig) envVars := []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDLogsEnabled, + Name: apicommon.DDLogsEnabled, Value: strconv.FormatBool(apiutils.BoolValue(logSpec.Enabled)), }, { - Name: datadoghqv1alpha1.DDLogsConfigContainerCollectAll, + Name: apicommon.DDLogsConfigContainerCollectAll, Value: strconv.FormatBool(apiutils.BoolValue(logSpec.LogsConfigContainerCollectAll)), }, { - Name: datadoghqv1alpha1.DDLogsContainerCollectUsingFiles, + Name: apicommon.DDLogsContainerCollectUsingFiles, Value: strconv.FormatBool(apiutils.BoolValue(logSpec.ContainerCollectUsingFiles)), }, } if logSpec.OpenFilesLimit != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDLogsConfigOpenFilesLimit, + Name: apicommon.DDLogsConfigOpenFilesLimit, Value: strconv.FormatInt(int64(*logSpec.OpenFilesLimit), 10), }) } @@ -819,7 +819,7 @@ func getEnvVarsForMetadataAsTags(agentConfig *datadoghqv1alpha1.NodeAgentConfig) return nil, err } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDNodeLabelsAsTags, + Name: apicommon.DDNodeLabelsAsTags, Value: string(nodeLabelsAsTags), }) } @@ -830,7 +830,7 @@ func getEnvVarsForMetadataAsTags(agentConfig *datadoghqv1alpha1.NodeAgentConfig) return nil, err } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDPodLabelsAsTags, + Name: apicommon.DDPodLabelsAsTags, Value: string(podLabelsAsTags), }) } @@ -841,7 +841,7 @@ func getEnvVarsForMetadataAsTags(agentConfig *datadoghqv1alpha1.NodeAgentConfig) return nil, err } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDPodAnnotationsAsTags, + Name: apicommon.DDPodAnnotationsAsTags, Value: string(podAnnotationsAsTags), }) } @@ -852,7 +852,7 @@ func getEnvVarsForMetadataAsTags(agentConfig *datadoghqv1alpha1.NodeAgentConfig) return nil, err } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDNamespaceLabelsAsTags, + Name: apicommon.DDNamespaceLabelsAsTags, Value: string(namespaceLabelsAsTags), }) } @@ -873,7 +873,7 @@ func getEnvVarsForAgent(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) Value: strconv.Itoa(int(*spec.Agent.Config.HealthPort)), }, { - Name: datadoghqv1alpha1.DDCollectKubeEvents, + Name: apicommon.DDCollectKubernetesEvents, Value: strconv.FormatBool(*spec.Agent.Config.CollectEvents), }, { @@ -881,8 +881,8 @@ func getEnvVarsForAgent(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) Value: strconv.FormatBool(*spec.Agent.Config.LeaderElection), }, { - Name: datadoghqv1alpha1.DDLeaderLeaseName, - Value: utils.GetDatadogLeaderElectionResourceName(dda.Name), + Name: apicommon.DDLeaderLeaseName, + Value: utils.GetDatadogLeaderElectionResourceName(dda), }, } metadataAsTagsEnv, err := getEnvVarsForMetadataAsTags(spec.Agent.Config) @@ -901,7 +901,7 @@ func getEnvVarsForAgent(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) if isDogstatsdConfigured(&spec) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDDogstatsdOriginDetection, + Name: apicommon.DDDogstatsdOriginDetection, Value: strconv.FormatBool(*spec.Agent.Config.Dogstatsd.DogstatsdOriginDetection), }, ) @@ -917,11 +917,11 @@ func getEnvVarsForAgent(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) if isSystemProbeEnabled(&dda.Spec) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDSystemProbeSocketPath, + Name: apicommon.DDSystemProbeSocket, Value: filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "sysprobe.sock"), }) - envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableTCPQueueLength, datadoghqv1alpha1.DDSystemProbeTCPQueueLengthEnabled, envVars) - envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableOOMKill, datadoghqv1alpha1.DDSystemProbeOOMKillEnabled, envVars) + envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableTCPQueueLength, apicommon.DDSystemProbeTCPQueueLengthEnabled, envVars) + envVars = addBoolPointerEnVar(dda.Spec.Agent.SystemProbe.EnableOOMKill, apicommon.DDSystemProbeOOMKillEnabled, envVars) } if isClusterAgentEnabled(dda.Spec.ClusterAgent) { @@ -956,7 +956,7 @@ func getEnvVarsForSecurityAgent(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.E envVars := []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDComplianceConfigEnabled, + Name: apicommon.DDComplianceConfigEnabled, Value: strconv.FormatBool(complianceEnabled), }, { @@ -968,38 +968,38 @@ func getEnvVarsForSecurityAgent(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.E if complianceEnabled { if dda.Spec.Agent.Security.Compliance.CheckInterval != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDComplianceConfigCheckInterval, + Name: apicommon.DDComplianceConfigCheckInterval, Value: strconv.FormatInt(dda.Spec.Agent.Security.Compliance.CheckInterval.Nanoseconds(), 10), }) } if dda.Spec.Agent.Security.Compliance.ConfigDir != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDComplianceConfigDir, + Name: apicommon.DDComplianceConfigDir, Value: datadoghqv1alpha1.SecurityAgentComplianceConfigDirVolumePath, }) } } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigEnabled, + Name: apicommon.DDRuntimeSecurityConfigEnabled, Value: strconv.FormatBool(runtimeEnabled), }) if runtimeEnabled { if dda.Spec.Agent.Security.Runtime.PoliciesDir != nil { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigPoliciesDir, + Name: apicommon.DDRuntimeSecurityConfigPoliciesDir, Value: datadoghqv1alpha1.SecurityAgentRuntimePoliciesDirVolumePath, }) } envVars = append(envVars, []corev1.EnvVar{ { - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigSocket, + Name: apicommon.DDRuntimeSecurityConfigSocket, Value: filepath.Join(datadoghqv1alpha1.SystemProbeSocketVolumePath, "runtime-security.sock"), }, { - Name: datadoghqv1alpha1.DDRuntimeSecurityConfigSyscallMonitorEnabled, + Name: apicommon.DDRuntimeSecurityConfigSyscallMonitorEnabled, Value: strconv.FormatBool(isSyscallMonitorEnabled(&dda.Spec)), }, }...) @@ -1019,12 +1019,12 @@ func getEnvVarsForSecurityAgent(dda *datadoghqv1alpha1.DatadogAgent) ([]corev1.E }, { Name: apicommon.DDClusterAgentKubeServiceName, - Value: component.GetClusterAgentServiceName(dda), + Value: componentdca.GetClusterAgentServiceName(dda), }, } clusterEnv = append(clusterEnv, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDClusterAgentAuthToken, + Name: apicommon.DDClusterAgentAuthToken, ValueFrom: getClusterAgentAuthToken(dda), }) envVars = append(envVars, clusterEnv...) @@ -1854,7 +1854,7 @@ func getAgentServiceName(dda *datadoghqv1alpha1.DatadogAgent) string { // getAPIKeyFromSecret returns the Agent API key as an env var source func getAPIKeyFromSecret(dda *datadoghqv1alpha1.DatadogAgent) *corev1.EnvVarSource { - _, name, key := utils.GetAPIKeySecret(&dda.Spec.Credentials.DatadogCredentials, utils.GetDefaultCredentialsSecretName(dda)) + _, name, key := datadoghqv1alpha1.GetAPIKeySecret(&dda.Spec.Credentials.DatadogCredentials, datadoghqv1alpha1.GetDefaultCredentialsSecretName(dda)) return buildEnvVarFromSecret(name, key) } @@ -1865,7 +1865,7 @@ func getClusterAgentAuthToken(dda *datadoghqv1alpha1.DatadogAgent) *corev1.EnvVa // getAppKeyFromSecret returns the Agent API key as an env var source func getAppKeyFromSecret(dda *datadoghqv1alpha1.DatadogAgent) *corev1.EnvVarSource { - _, name, key := utils.GetAppKeySecret(&dda.Spec.Credentials.DatadogCredentials, utils.GetDefaultCredentialsSecretName(dda)) + _, name, key := datadoghqv1alpha1.GetAppKeySecret(&dda.Spec.Credentials.DatadogCredentials, datadoghqv1alpha1.GetDefaultCredentialsSecretName(dda)) return buildEnvVarFromSecret(name, key) } @@ -1912,10 +1912,6 @@ func getMetricsServerAPIServiceName() string { return "v1beta1.external.metrics.k8s.io" } -func getClusterAgentRbacResourcesName(dda *datadoghqv1alpha1.DatadogAgent) string { - return fmt.Sprintf("%s-%s", dda.Name, apicommon.DefaultClusterAgentResourceSuffix) -} - func getAgentRbacResourcesName(dda *datadoghqv1alpha1.DatadogAgent) string { return fmt.Sprintf("%s-%s", dda.Name, apicommon.DefaultAgentResourceSuffix) } @@ -1925,7 +1921,7 @@ func getClusterChecksRunnerRbacResourcesName(dda *datadoghqv1alpha1.DatadogAgent } func getHPAClusterRoleBindingName(dda *datadoghqv1alpha1.DatadogAgent) string { - return fmt.Sprintf(authDelegatorName, getClusterAgentRbacResourcesName(dda)) + return fmt.Sprintf(authDelegatorName, componentdca.GetClusterAgentRbacResourcesName(dda)) } func getExternalMetricsReaderClusterRoleName(dda *datadoghqv1alpha1.DatadogAgent, versionInfo *version.Info) string { @@ -1933,7 +1929,7 @@ func getExternalMetricsReaderClusterRoleName(dda *datadoghqv1alpha1.DatadogAgent // For GKE clusters the name of the role is hardcoded and cannot be changed - HPA controller expects this name return "external-metrics-reader" } - return fmt.Sprintf(externalMetricsReaderName, getClusterAgentRbacResourcesName(dda)) + return fmt.Sprintf(externalMetricsReaderName, componentdca.GetClusterAgentRbacResourcesName(dda)) } func getClusterChecksRunnerServiceAccount(dda *datadoghqv1alpha1.DatadogAgent) string { @@ -1990,12 +1986,12 @@ func prometheusScrapeEnvVars(logger logr.Logger, dda *datadoghqv1alpha1.DatadogA if apiutils.BoolValue(dda.Spec.Features.PrometheusScrape.Enabled) { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDPrometheusScrapeEnabled, + Name: apicommon.DDPrometheusScrapeEnabled, Value: apiutils.BoolToString(dda.Spec.Features.PrometheusScrape.Enabled), }) envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDPrometheusScrapeServiceEndpoints, + Name: apicommon.DDPrometheusScrapeServiceEndpoints, Value: apiutils.BoolToString(dda.Spec.Features.PrometheusScrape.ServiceEndpoints), }) @@ -2005,7 +2001,7 @@ func prometheusScrapeEnvVars(logger logr.Logger, dda *datadoghqv1alpha1.DatadogA logger.Error(err, "Invalid additional prometheus config, ignoring it") } else { envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDPrometheusScrapeChecks, + Name: apicommon.DDPrometheusScrapeChecks, Value: string(jsonValue), }) } @@ -2026,7 +2022,7 @@ func dsdMapperProfilesEnvVar(logger logr.Logger, dda *datadoghqv1alpha1.DatadogA return nil } return &corev1.EnvVar{ - Name: datadoghqv1alpha1.DDDogstatsdMapperProfiles, + Name: apicommon.DDDogstatsdMapperProfiles, Value: string(jsonValue), } } @@ -2036,7 +2032,7 @@ func dsdMapperProfilesEnvVar(logger logr.Logger, dda *datadoghqv1alpha1.DatadogA cmSelector.Name = dda.Spec.Agent.Config.Dogstatsd.MapperProfiles.ConfigMap.Name cmSelector.Key = dda.Spec.Agent.Config.Dogstatsd.MapperProfiles.ConfigMap.FileKey return &corev1.EnvVar{ - Name: datadoghqv1alpha1.DDDogstatsdMapperProfiles, + Name: apicommon.DDDogstatsdMapperProfiles, ValueFrom: &corev1.EnvVarSource{ConfigMapKeyRef: &cmSelector}, } } @@ -2289,12 +2285,12 @@ func envForClusterAgentConnection(dda *datadoghqv1alpha1.DatadogAgent) []corev1. }, { Name: apicommon.DDClusterAgentKubeServiceName, - Value: component.GetClusterAgentServiceName(dda), + Value: componentdca.GetClusterAgentServiceName(dda), }, } envVars = append(envVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDClusterAgentAuthToken, + Name: apicommon.DDClusterAgentAuthToken, ValueFrom: getClusterAgentAuthToken(dda), }) return envVars diff --git a/controllers/datadogagent/utils_kubelet.go b/controllers/datadogagent/utils_kubelet.go index bef927c4d..2b24a5c1a 100644 --- a/controllers/datadogagent/utils_kubelet.go +++ b/controllers/datadogagent/utils_kubelet.go @@ -29,14 +29,14 @@ func getKubeletEnvVars(dda *datadoghqv1alpha1.DatadogAgent) []corev1.EnvVar { } kubeletVars = append(kubeletVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDKubeletHost, + Name: apicommon.DDKubeletHost, ValueFrom: kubeletHostValueFrom, }) // TLS Verify if dda.Spec.Agent.Config.Kubelet != nil && dda.Spec.Agent.Config.Kubelet.TLSVerify != nil { kubeletVars = append(kubeletVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDKubeletTLSVerify, + Name: apicommon.DDKubeletTLSVerify, Value: apiutils.BoolToString(dda.Spec.Agent.Config.Kubelet.TLSVerify), }) } @@ -44,7 +44,7 @@ func getKubeletEnvVars(dda *datadoghqv1alpha1.DatadogAgent) []corev1.EnvVar { // CA Path if dda.Spec.Agent.Config.Kubelet != nil && (dda.Spec.Agent.Config.Kubelet.AgentCAPath != "" || dda.Spec.Agent.Config.Kubelet.HostCAPath != "") { kubeletVars = append(kubeletVars, corev1.EnvVar{ - Name: datadoghqv1alpha1.DDKubeletCAPath, + Name: apicommon.DDKubeletCAPath, Value: getAgentCAPath(dda), }) } diff --git a/controllers/datadogagent/utils_test.go b/controllers/datadogagent/utils_test.go index 39953a698..dc6ead22f 100644 --- a/controllers/datadogagent/utils_test.go +++ b/controllers/datadogagent/utils_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1/test" @@ -589,19 +590,19 @@ func Test_getEnvVarsForMetadataAsTags(t *testing.T) { }, want: []v1.EnvVar{ { - Name: datadoghqv1alpha1.DDNodeLabelsAsTags, + Name: apicommon.DDNodeLabelsAsTags, Value: singleMappingString, }, { - Name: datadoghqv1alpha1.DDPodLabelsAsTags, + Name: apicommon.DDPodLabelsAsTags, Value: singleMappingString, }, { - Name: datadoghqv1alpha1.DDPodAnnotationsAsTags, + Name: apicommon.DDPodAnnotationsAsTags, Value: singleMappingString, }, { - Name: datadoghqv1alpha1.DDNamespaceLabelsAsTags, + Name: apicommon.DDNamespaceLabelsAsTags, Value: singleMappingString, }, }, @@ -616,19 +617,19 @@ func Test_getEnvVarsForMetadataAsTags(t *testing.T) { }, want: []v1.EnvVar{ { - Name: datadoghqv1alpha1.DDNodeLabelsAsTags, + Name: apicommon.DDNodeLabelsAsTags, Value: multipleMappingString, }, { - Name: datadoghqv1alpha1.DDPodLabelsAsTags, + Name: apicommon.DDPodLabelsAsTags, Value: multipleMappingString, }, { - Name: datadoghqv1alpha1.DDPodAnnotationsAsTags, + Name: apicommon.DDPodAnnotationsAsTags, Value: multipleMappingString, }, { - Name: datadoghqv1alpha1.DDNamespaceLabelsAsTags, + Name: apicommon.DDNamespaceLabelsAsTags, Value: multipleMappingString, }, }, diff --git a/docs/how-to-contribute.md b/docs/how-to-contribute.md index 2f964f58e..d5a34e0e6 100644 --- a/docs/how-to-contribute.md +++ b/docs/how-to-contribute.md @@ -3,11 +3,13 @@ This project uses the `go module`. Be sure to have it activated with: `export GO111MODULE=on`. To list the available `make` commands, run: + ```shell make help ``` Some important commands: + ```shell $ make build CGO_ENABLED=0 go build -i -installsuffix cgo -ldflags '-w' -o controller ./cmd/manager/main.go @@ -29,3 +31,58 @@ make IMG=test/operator:tes deploy ``` Note: `IMG` currently defaults to: `datadog/datadog-operator:latest` + +## \[TMP\] how to test `v2alpha` + +* Install `cert-manager` needed for the webhook. + +```shell +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cert-manager.yaml +``` + +* Deploy with `v2alpha1` enabled and configured as the storage version. + +```console +KUSTOMIZE_CONFIG=config/test-v2 make deploy +``` +### Deploy a basic `v2alpha1.DatadogAgent` resource. + +The `examples/v2alpha1/min.yaml` file is containing the mininum information need in a DatadogAgent to start the deployment. + +```yaml +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + name: datadog +spec: + global: + credentials: + apiSecret: + secretName: datadog-secret + keyName: api-key + appSecret: + secretName: datadog-secret + keyName: app-key +``` + +Before deploying this resource, create a secret that contains an `api-key` and an `app-key`. By default the Operator is installed in the +`system` namespace = and only watch the resource in the this namespace. So the secret and deployment need to be done in the same namespace. + +```console +kubens system +``` + +```console +#!/bin/bash + +export KUBE_NAMESPACE=system +export DD_API_KEY= +export DD_APP_KEY= +export DD_TOKEN=<32-chars-token> + +kubectl -n $KUBE_NAMESPACE create secret generic datadog-secret --from-literal api-key=$DD_API_KEY --from-literal app-key=$DD_APP_KEY --from-literal token=$DD_TOKEN + +kubectl -n $KUBE_NAMESPACE apply -f `examples/v2alpha1/min.yaml` +``` + +The Operator should start deploying the `agent` and `cluster-agent`. diff --git a/examples/v2alpha1/min.yaml b/examples/v2alpha1/min.yaml new file mode 100644 index 000000000..b15eb4cf5 --- /dev/null +++ b/examples/v2alpha1/min.yaml @@ -0,0 +1,13 @@ +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + name: datadog +spec: + global: + credentials: + apiSecret: + secretName: datadog-secret + keyName: api-key + appSecret: + secretName: datadog-secret + keyName: app-key diff --git a/go.mod b/go.mod index f14799c9c..bb28bd815 100644 --- a/go.mod +++ b/go.mod @@ -59,6 +59,7 @@ require ( github.com/go-logr/zapr v1.2.0 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/spec v0.19.2 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect diff --git a/go.sum b/go.sum index 068d75e34..44bc797a8 100644 --- a/go.sum +++ b/go.sum @@ -215,6 +215,7 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9 github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= diff --git a/pkg/controller/utils/datadog/metrics_forwarder.go b/pkg/controller/utils/datadog/metrics_forwarder.go index 746aef58f..f2a4da64b 100644 --- a/pkg/controller/utils/datadog/metrics_forwarder.go +++ b/pkg/controller/utils/datadog/metrics_forwarder.go @@ -14,10 +14,9 @@ import ( "sync" "time" - datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" + "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" apiutils "github.com/DataDog/datadog-operator/apis/utils" "github.com/DataDog/datadog-operator/pkg/config" - "github.com/DataDog/datadog-operator/pkg/controller/utils" "github.com/DataDog/datadog-operator/pkg/controller/utils/condition" "github.com/DataDog/datadog-operator/pkg/secrets" @@ -85,7 +84,7 @@ type metricsForwarder struct { decryptor secrets.Decryptor creds sync.Map baseURL string - status *datadoghqv1alpha1.DatadogAgentCondition + status *v1alpha1.DatadogAgentCondition credsManager *config.CredentialManager sync.Mutex } @@ -185,13 +184,13 @@ func (mf *metricsForwarder) stop() { close(mf.stopChan) } -func (mf *metricsForwarder) getStatus() *datadoghqv1alpha1.DatadogAgentCondition { +func (mf *metricsForwarder) getStatus() *v1alpha1.DatadogAgentCondition { mf.Lock() defer mf.Unlock() return mf.status } -func (mf *metricsForwarder) setStatus(newStatus *datadoghqv1alpha1.DatadogAgentCondition) { +func (mf *metricsForwarder) setStatus(newStatus *v1alpha1.DatadogAgentCondition) { mf.Lock() defer mf.Unlock() mf.status = newStatus @@ -373,7 +372,7 @@ func (mf *metricsForwarder) delegatedValidateCreds(apiKey, appKey string) (*api. // sendStatusMetrics forwards metrics for each component deployment (agent, clusteragent, clustercheck runner) // based on the status of DatadogAgent -func (mf *metricsForwarder) sendStatusMetrics(status *datadoghqv1alpha1.DatadogAgentStatus) error { +func (mf *metricsForwarder) sendStatusMetrics(status *v1alpha1.DatadogAgentStatus) error { if status == nil { return errors.New("nil status") } @@ -452,7 +451,7 @@ func (mf *metricsForwarder) delegatedSendDeploymentMetric(metricValue float64, c } // updateTags updates tags of the DatadogAgent -func (mf *metricsForwarder) updateTags(dda *datadoghqv1alpha1.DatadogAgent) { +func (mf *metricsForwarder) updateTags(dda *v1alpha1.DatadogAgent) { if dda == nil { mf.tags = []string{} return @@ -486,8 +485,8 @@ func hashKeys(apiKey, appKey string) uint64 { } // getDatadogAgent retrieves the DatadogAgent using Get client method -func (mf *metricsForwarder) getDatadogAgent() (*datadoghqv1alpha1.DatadogAgent, error) { - dda := &datadoghqv1alpha1.DatadogAgent{} +func (mf *metricsForwarder) getDatadogAgent() (*v1alpha1.DatadogAgent, error) { + dda := &v1alpha1.DatadogAgent{} err := mf.k8sClient.Get(context.TODO(), mf.namespacedName, dda) return dda, err @@ -495,7 +494,7 @@ func (mf *metricsForwarder) getDatadogAgent() (*datadoghqv1alpha1.DatadogAgent, // getCredentials returns the Datadog API Key and APP Key, it returns an error if one key is missing // getCredentials tries to get the credentials from the CRD, then from operator configuration -func (mf *metricsForwarder) getCredentials(dda *datadoghqv1alpha1.DatadogAgent) (string, string, error) { +func (mf *metricsForwarder) getCredentials(dda *v1alpha1.DatadogAgent) (string, string, error) { apiKey, appKey, err := mf.getCredsFromDatadogAgent(dda) if err != nil { if errors.Is(err, ErrEmptyAPIKey) || errors.Is(err, ErrEmptyAPPKey) { @@ -510,14 +509,14 @@ func (mf *metricsForwarder) getCredentials(dda *datadoghqv1alpha1.DatadogAgent) return apiKey, appKey, err } -func (mf *metricsForwarder) getCredsFromDatadogAgent(dda *datadoghqv1alpha1.DatadogAgent) (string, string, error) { +func (mf *metricsForwarder) getCredsFromDatadogAgent(dda *v1alpha1.DatadogAgent) (string, string, error) { var err error apiKey, appKey := "", "" if dda.Spec.Credentials.APIKey != "" { apiKey = dda.Spec.Credentials.APIKey } else { - _, secretName, secretKeyName := utils.GetAPIKeySecret(&dda.Spec.Credentials.DatadogCredentials, utils.GetDefaultCredentialsSecretName(dda)) + _, secretName, secretKeyName := v1alpha1.GetAPIKeySecret(&dda.Spec.Credentials.DatadogCredentials, v1alpha1.GetDefaultCredentialsSecretName(dda)) apiKey, err = mf.getKeyFromSecret(dda, secretName, secretKeyName) if err != nil { return "", "", err @@ -527,7 +526,7 @@ func (mf *metricsForwarder) getCredsFromDatadogAgent(dda *datadoghqv1alpha1.Data if dda.Spec.Credentials.AppKey != "" { appKey = dda.Spec.Credentials.AppKey } else { - _, secretName, secretKeyName := utils.GetAppKeySecret(&dda.Spec.Credentials.DatadogCredentials, utils.GetDefaultCredentialsSecretName(dda)) + _, secretName, secretKeyName := v1alpha1.GetAppKeySecret(&dda.Spec.Credentials.DatadogCredentials, v1alpha1.GetDefaultCredentialsSecretName(dda)) appKey, err = mf.getKeyFromSecret(dda, secretName, secretKeyName) if err != nil { return "", "", err @@ -603,7 +602,7 @@ func (mf *metricsForwarder) cleanSecretsCache() { } // getKeyFromSecret used to retrieve an api or app key from a secret object -func (mf *metricsForwarder) getKeyFromSecret(dda *datadoghqv1alpha1.DatadogAgent, secretName string, dataKey string) (string, error) { +func (mf *metricsForwarder) getKeyFromSecret(dda *v1alpha1.DatadogAgent, secretName string, dataKey string) (string, error) { secret := &corev1.Secret{} err := mf.k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: dda.Namespace, Name: secretName}, secret) if err != nil { @@ -624,10 +623,10 @@ func (mf *metricsForwarder) updateStatusIfNeeded(err error) { } if oldStatus := mf.getStatus(); oldStatus == nil { - newStatus := condition.NewDatadogAgentStatusCondition(datadoghqv1alpha1.DatadogMetricsActive, conditionStatus, now, "", description) + newStatus := condition.NewDatadogAgentStatusCondition(v1alpha1.DatadogMetricsActive, conditionStatus, now, "", description) mf.setStatus(&newStatus) } else { - mf.setStatus(condition.UpdateDatadogAgentStatusCondition(oldStatus, now, datadoghqv1alpha1.DatadogMetricsActive, conditionStatus, description)) + mf.setStatus(condition.UpdateDatadogAgentStatusCondition(oldStatus, now, v1alpha1.DatadogMetricsActive, conditionStatus, description)) } } @@ -686,7 +685,7 @@ func (mf *metricsForwarder) isEventChanFull() bool { return len(mf.eventChan) == cap(mf.eventChan) } -func getbaseURL(dda *datadoghqv1alpha1.DatadogAgent) string { +func getbaseURL(dda *v1alpha1.DatadogAgent) string { if apiutils.BoolValue(dda.Spec.Agent.Enabled) && dda.Spec.Agent.Config != nil && dda.Spec.Agent.Config.DDUrl != nil { return *dda.Spec.Agent.Config.DDUrl } else if dda.Spec.Site != "" { diff --git a/pkg/controller/utils/shared_utils.go b/pkg/controller/utils/shared_utils.go index 23d729315..38e8754e3 100644 --- a/pkg/controller/utils/shared_utils.go +++ b/pkg/controller/utils/shared_utils.go @@ -10,70 +10,20 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" - apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" - datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// GetDefaultCredentialsSecretName returns the default name for credentials secret -func GetDefaultCredentialsSecretName(dda *datadoghqv1alpha1.DatadogAgent) string { - return dda.Name -} - -// GetAPIKeySecret returns the API key secret name and the key inside the secret -// returns , secretName, secretKey -func GetAPIKeySecret(credentials *datadoghqv1alpha1.DatadogCredentials, defaultName string) (bool, string, string) { - if credentials.APISecret != nil { - if credentials.APISecret.KeyName != "" { - return true, credentials.APISecret.SecretName, credentials.APISecret.KeyName - } - - return true, credentials.APISecret.SecretName, apicommon.DefaultAPIKeyKey - } - - if credentials.APIKeyExistingSecret != "" { - return true, credentials.APIKeyExistingSecret, apicommon.DefaultAPIKeyKey - } - - if credentials.APIKey != "" { - return true, defaultName, apicommon.DefaultAPIKeyKey - } - - return false, defaultName, apicommon.DefaultAPIKeyKey -} - -// GetAppKeySecret returns the APP key secret name and the key inside the secret -// returns , secretName, secretKey -func GetAppKeySecret(credentials *datadoghqv1alpha1.DatadogCredentials, defaultName string) (bool, string, string) { - if credentials.APPSecret != nil { - if credentials.APPSecret.KeyName != "" { - return true, credentials.APPSecret.SecretName, credentials.APPSecret.KeyName - } - - return true, credentials.APPSecret.SecretName, apicommon.DefaultAPPKeyKey - } - - if credentials.AppKeyExistingSecret != "" { - return true, credentials.AppKeyExistingSecret, apicommon.DefaultAPPKeyKey - } - - if credentials.AppKey != "" { - return true, defaultName, apicommon.DefaultAPPKeyKey - } - - return false, defaultName, apicommon.DefaultAPPKeyKey -} - // ShouldReturn returns if we should stop the reconcile loop based on result func ShouldReturn(result reconcile.Result, err error) bool { return err != nil || result.Requeue || result.RequeueAfter > 0 } -// GetDatadogLeaderElectionResourceName returns the name of the ConfigMap used by the cluster agent to elect a leader -func GetDatadogLeaderElectionResourceName(ddaName string) string { - return fmt.Sprintf("%s-leader-election", ddaName) +// GetDatadogLeaderElectionResourceName return the nome of the Resource managing the leader election token info. +func GetDatadogLeaderElectionResourceName(dda metav1.Object) string { + return fmt.Sprintf("%s-leader-election", dda.GetName()) } // GetDatadogTokenResourceName returns the name of the ConfigMap used by the cluster agent to store token -func GetDatadogTokenResourceName(ddaName string) string { - return fmt.Sprintf("%stoken", ddaName) +func GetDatadogTokenResourceName(dda metav1.Object) string { + return fmt.Sprintf("%stoken", dda.GetName()) }